Python keras 模块,backend() 实例源码

我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用keras.backend()

项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def setup_tutorial():
    """
    Helper function to check correct configuration of tf and keras for tutorial
    :return: True if setup checks completed
    """

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
              "to 'th', temporarily setting to 'tf'")

    return True
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def setup_tutorial():
    """
    Helper function to check correct configuration of tf and keras for tutorial
    :return: True if setup checks completed
    """

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
              "to 'th', temporarily setting to 'tf'")

    return True
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def setup_tutorial():
    """
    Helper function to check correct configuration of tf and keras for tutorial
    :return: True if setup checks completed
    """

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
              "to 'th', temporarily setting to 'tf'")

    return True
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def variable(value, dtype=None, name=None, constraint=None):
    if isinstance(value, Tensor):
        value = value.value
    if isinstance(value, torch.autograd.Variable):
        value = value.data
    if 'torch' in str(type(value)):
        value = value.numpy()
    name = _prepare_name(name, 'variable')
    if dtype is None:
        dtype = keras.backend.floatx()
    if value.dtype != dtype:
        value = np.cast[dtype](value)
    torch_tensor = torch.from_numpy(value)
    torch_variable = torch.autograd.Variable(torch_tensor, requires_grad=True)
    ktorch_variable = Variable(torch_variable, name=name)
    ktorch_variable.constraint = None
    make_keras_tensor(ktorch_variable)
    return ktorch_variable
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def constant(value, dtype=None, shape=None, name=None):
    value = np.array(value)
    name = _prepare_name(name, 'constant')
    if dtype is None:
        dtype = keras.backend.floatx()
    if value.dtype != dtype:
        value = np.cast[dtype](value)
    if value.shape == ():
        if shape is None:
            shape = ()
        value = np.ones(shape) * value
    torch_tensor = torch.from_numpy(value)
    torch_variable = torch.autograd.Variable(torch_tensor, requires_grad=False)
    ktorch_variable = Variable(torch_variable, name=name)
    make_keras_tensor(ktorch_variable)
    return ktorch_variable
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def saveModel(self,outfile):
        self.keras_model.save(self.outputDir+outfile)
        import tensorflow as tf
        import keras.backend as K
        tfsession=K.get_session()
        saver = tf.train.Saver()
        tfoutpath=self.outputDir+outfile+'_tfsession/tf'
        import os
        os.system('rm -rf '+tfoutpath)
        os.system('mkdir -p '+tfoutpath)
        saver.save(tfsession, tfoutpath)


        #import h5py
        #f = h5py.File(self.outputDir+outfile, 'r+')
        #del f['optimizer_weights']
        #f.close()
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_instance_utils(self, get_model):
        new_session()
        model = get_model()

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        expe = Experiment(model)
        expe.model_dict = model
        expe.backend_name = 'another_backend'
        expe.model_dict = model

        assert expe.backend is not None
        expe = Experiment()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_generator_setups(self, get_generators):
        gen_t, data_t, d_stream_t, gen, data, d_stream, nb = get_generators
        nb_train, nb_val = nb
        test_model = model()

        test_model.compile(loss='binary_crossentropy',
                           optimizer='rmsprop')
        expe = Experiment(test_model)
        expe.fit_gen([gen_t], [gen], nb_epoch=2,
                     samples_per_epoch=nb_train,
                     nb_val_samples=nb_val,
                     verbose=2, overwrite=True)
        close_gens(gen_t, data_t, d_stream_t)
        close_gens(gen, data, d_stream)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_build_predict_func(self, get_model):
        """Test the build of a model"""
        new_session()
        X_tr = np.ones((train_samples, input_dim))
        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_name = model.__class__.__name__

        pred_func = KTB.build_predict_func(model)

        tensors = [X_tr]
        if model_name != 'Model':
            tensors.append(1.)

        res = pred_func(tensors)

        assert len(res[0]) == len(X_tr)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_fit(self, get_model):
        "Test the training of a serialized model"
        new_session()
        data, data_val = make_data(train_samples, test_samples)

        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_dict = dict()
        model_dict['model_arch'] = to_dict_w_opt(model)

        res = KTB.train(copy.deepcopy(model_dict['model_arch']), [data],
                        [data_val], [])
        res = KTB.fit(NAME, VERSION, model_dict, [data], 'test', [data_val],
                      [])

        assert len(res) == 4

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if K.backend() == 'tensorflow':
            xt = tf.transpose(x, perm=(2, 0 ,1))
            gt = tf.gather(xt, self.indices)
            return tf.transpose(gt, perm=(1, 2, 0))
        return x[:, :, self.indices]
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def data_cifar10():
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def do_sparse():
    return K == KTF or KTH.th_sparse_module
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def _sort_weights_by_name(self, weights):
        """Sorts weights by name and returns them."""

        if not weights:
            return []

        if K.backend() == 'theano':
            key = lambda x: x.name if x.name else x.auto_name
        else:
            key = lambda x: x.name

        weights.sort(key=key)
        return weights
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def on_registration(self, params):
        if not self.registered:
            self.registered = True

            if self.is_master_process():
                self.logger.info("Job %s/%s started." % (self.model_name, self.job_id))
                self.logger.info("Open http://%s/model/%s/job/%s to monitor it." % (self.host, self.model_name, self.job_id))

            self.logger.debug('Git backend start')
            self.git.start()
        else:
            self.logger.info("Successfully reconnected.")
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def on_signusr1(self, signal, frame):
        self.logger.warning("USR1: backend job_id=%s (running=%s, ended=%s), client (online=%s, active=%s, registered=%s, "
                            "connected=%s, queue=%d), git (online=%s, active_thread=%s, last_push_time=%s)." % (
          str(self.job_id),
          str(self.running),
          str(self.ended),
          str(self.client.online),
          str(self.client.active),
          str(self.client.registered),
          str(self.client.connected),
          len(self.client.queue),
          str(self.git.online),
          str(self.git.active_thread),
          str(self.git.last_push_time),
        ))
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def is_master_process(self):
        """
        Master means that aetros.backend.start_job() has been called without using the command `aetros start`.
        If master is true, we collect and track some data that usually `aetros start` would do and reset the job's
        temp files on the server.
        :return:
        """

        return os.getenv('AETROS_JOB_ID') is None
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def sync_weights(self, push=True):

        if not os.path.exists(self.get_job_model().get_weights_filepath_latest()):
            return

        self.logger.debug("sync weights...")
        self.set_status('SYNC WEIGHTS', add_section=False)

        with open(self.get_job_model().get_weights_filepath_latest(), 'rb') as f:
            import keras.backend
            self.git.commit_file('Added weights', 'aetros/weights/latest.hdf5', f.read())

            image_data_format = None
            if hasattr(keras.backend, 'set_image_data_format'):
                image_data_format = keras.backend.image_data_format()

            info = {
                'framework': 'keras',
                'backend': keras.backend.backend(),
                'image_data_format': image_data_format
            }
            self.git.commit_file('Added weights', 'aetros/weights/latest.json', json.dumps(info))
            if push:
                self.git.push()

        # todo, implement optional saving of self.get_job_model().get_weights_filepath_best()
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def start_keras(logger, job_backend):
    if 'KERAS_BACKEND' not in os.environ:
        os.environ['KERAS_BACKEND'] = 'tensorflow'

    from . import keras_model_utils

    # we need to import keras here, so we know which backend is used (and whether GPU is used)
    os.chdir(job_backend.git.work_tree)
    logger.debug("Start simple model")

    # we use the source from the job commit directly
    with job_backend.git.batch_commit('Git Version'):
        job_backend.set_system_info('git_remote_url', job_backend.git.get_remote_url('origin'))
        job_backend.set_system_info('git_version', job_backend.git.job_id)

    # all our shapes are Tensorflow schema. (height, width, channels)
    import keras.backend
    if hasattr(keras.backend, 'set_image_dim_ordering'):
        keras.backend.set_image_dim_ordering('tf')

    if hasattr(keras.backend, 'set_image_data_format'):
        keras.backend.set_image_data_format('channels_last')

    from .KerasCallback import KerasCallback
    trainer = Trainer(job_backend)
    keras_logger = KerasCallback(job_backend, job_backend.logger)

    job_backend.progress(0, job_backend.job['config']['epochs'])

    logger.info("Start training")
    keras_model_utils.job_start(job_backend, trainer, keras_logger)

    job_backend.done()
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def tf_model_eval_distance(sess, x, model1, model2, X_test):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param model1: model output original predictions
    :param model2: model output squeezed predictions
    :param X_test: numpy array with training inputs
    :return: a float vector with the distance value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
                                    axis=1))
    l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test), start + FLAGS.batch_size)
            cur_batch_size = end - start

            l1_dist_vec[start:end] = l1_diff.eval(feed_dict={x: X_test[start:end],keras.backend.learning_phase(): 0})

        assert end >= len(X_test)
    return l1_dist_vec
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def do_sparse():
    return K == KTF or KTH.th_sparse_module
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def data_cifar10():
    """
        Preprocess CIFAR10 dataset
        :return:
        """

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test


#conv_2d
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def data_cifar10():
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    np.save("cifar10_legitimate.npy",X_test)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def data_stl10():
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    # These values are specific to CIFAR10
    img_rows = 96
    img_cols = 96
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    #(X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = np.load('x_stl10_train.npy')
    y_train = np.load('y_stl10_train.npy') - 1
    X_test = np.load('x_stl10_test.npy')
    y_test = np.load('y_stl10_test.npy') - 1
    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    # np.save("cifar10_legitimate.npy",X_test)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def data_stl10():
    """
        Preprocess STL dataset
        :return:
        """

    # These values are specific to CIFAR10
    img_rows = 96
    img_cols = 96
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    #(X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = np.load('x_stl10_train.npy')
    y_train = np.load('y_stl10_train.npy') - 1
    X_test = np.load('x_stl10_test.npy')
    y_test = np.load('y_stl10_test.npy') - 1
    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    # np.save("cifar10_legitimate.npy",X_test)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test


#getting the grid visualization
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def data_cifar10():
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def substitute_model(img_rows=28, img_cols=28, nb_classes=10):
    """
    Defines the model architecture to be used by the substitute
    :param img_rows: number of rows in input
    :param img_cols: number of columns in input
    :param nb_classes: number of classes in output
    :return: keras model
    """
    model = Sequential()

    # Find out the input shape ordering
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (1, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, 1)

    # Define a fully connected model (it's different than the black-box)
    layers = [Flatten(input_shape=input_shape),
              Dense(200),
              Activation('relu'),
              Dropout(0.5),
              Dense(200),
              Activation('relu'),
              Dropout(0.5),
              Dense(nb_classes),
              Activation('softmax')]

    for layer in layers:
        model.add(layer)

    return model
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def substitute_model(img_rows=28, img_cols=28, nb_classes=10):
    """
    Defines the model architecture to be used by the substitute
    :param img_rows: number of rows in input
    :param img_cols: number of columns in input
    :param nb_classes: number of classes in output
    :return: keras model
    """
    model = Sequential()

    # Find out the input shape ordering
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (1, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, 1)

    # Define a fully connected model (it's different than the black-box)
    layers = [Flatten(input_shape=input_shape),
              Dense(200),
              Activation('relu'),
              Dropout(0.5),
              Dense(200),
              Activation('relu'),
              Dropout(0.5),
              Dense(nb_classes),
              Activation('softmax')]

    for layer in layers:
        model.add(layer)

    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def do_sparse():
    return K == KTF or KTH.th_sparse_module
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def feature_to_image(features, height=28, width=28, channels=1, backend=K):
    '''
    Reshape a flattened image to the input format for convolutions.

    Can be used either as a Keras operation using the default backend or
    with numpy by using the argument backend=np

    Conforms to the image data format setting defined in ~/.keras/keras.json
    '''
    if K.image_data_format() == "channels_first":
        return backend.reshape(features, (-1, channels, height, width))
    else:
        return backend.reshape(features, (-1, height, width, channels))
项目:FeatureSqueezing    作者:uvasrg    | 项目源码 | 文件源码
def tf_model_eval_distance(sess, x, model1, model2, X_test):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param model1: model output original predictions
    :param model2: model output squeezed predictions
    :param X_test: numpy array with training inputs
    :return: a float vector with the distance value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
                                    axis=1))
    l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test), start + FLAGS.batch_size)
            cur_batch_size = end - start

            l1_dist_vec[start:end] = l1_diff.eval(feed_dict={x: X_test[start:end],keras.backend.learning_phase(): 0})

        assert end >= len(X_test)
    return l1_dist_vec
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    name = _prepare_name(name, 'placeholder')
    if sparse:
        raise Exception('Sparse tensors are not supported yet :( ')
    if dtype is None:
        dtype = keras.backend.floatx()
    ktorch_tensor = Tensor(name=name, shape=shape, ndim=ndim, dtype=dtype)
    make_keras_tensor(ktorch_tensor)
    ktorch_tensor._ktorch_placeholder = True
    return ktorch_tensor
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def decode(y, relu_max):
    print 'decoder input shape:', y._keras_shape
    assert len(y._keras_shape) == 2
    if relu_max:
        x = GaussianNoise(0.2)(y)
        # x = Activation(utils.relu_n(1))(x)
    else:
        x = y

    x = Reshape((1, 1, LATENT_DIM))(x)
    # 1, 1, LATENT_DIM
    if relu_max:
        print 'in decode: relu_max:', relu_max
        x = Activation(utils.scale_up(relu_max))(x)
    # x = BN(mode=2, axis=3)(x) # this bn seems not good? NOT VERIFIED

    # why use 512 instead of 256 here?
    batch_size = keras.backend.shape(x)[0]
    x = Deconv2D(512, 4, 4, output_shape=[batch_size, 4, 4, 512],
                 activation='relu', border_mode='same', subsample=(4,4))(x)
    x = BN(mode=2, axis=3)(x)
    # 4, 4, 512
    x = Deconv2D(256, 5, 5, output_shape=[batch_size, 8, 8, 256],
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 8, 8, 256
    x = Deconv2D(128, 5, 5, output_shape=(batch_size, 16, 16, 128),
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 16, 16, 256
    x = Deconv2D(64, 5, 5, output_shape=(batch_size, 32, 32, 64),
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 32, 32, 64
    x = Deconv2D(3, 5, 5, output_shape=(batch_size, 32, 32, 3),
                 activation='linear', border_mode='same', subsample=(1,1))(x)
    # 32, 32, 3
    x = BN(mode=2, axis=3)(x)
    return x
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def new_session():
    if K.backend() == 'tensorflow':  # pragma: no cover
        import tensorflow as tf
        K.clear_session()
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        K.set_session(session)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def get_loss():
    def return_loss():
        import keras.backend as K
        def cat_cross(y_true, y_pred):
            '''A test of custom loss function
            '''
            return K.categorical_crossentropy(y_pred, y_true)
        return cat_cross
    return return_loss
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def get_metric():
    def return_metric():
        import keras.backend as K
        def cosine_proximity(y_true, y_pred):
            y_true = K.l2_normalize(y_true, axis=-1)
            y_pred = K.l2_normalize(y_pred, axis=-1)
            return -K.mean(y_true * y_pred)
        return cosine_proximity
    return return_metric
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit(self, get_model, get_loss_metric,
                            get_custom_l, get_callback_fix):
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        expe = Experiment(model)

        for mod in [None, model]:
            for data_val_loc in [None, data_val]:
                expe.fit([data], [data_val_loc], model=mod, nb_epoch=2,
                         batch_size=batch_size, metrics=metrics,
                         custom_objects=cust_objects, overwrite=True,
                         callbacks=get_callback_fix)

        expe.backend_name = 'another_backend'
        expe.load_model()
        expe.load_model(expe.mod_id, expe.data_id)

        assert expe.data_id is not None
        assert expe.mod_id is not None
        assert expe.params_dump is not None

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit_gen(self, get_model, get_loss_metric,
                                get_custom_l, get_callback_fix):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        model_name = model.__class__.__name__
        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        for val in [1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            expe.fit_gen([gen], [val], nb_epoch=2,
                         model=model,
                         metrics=metrics,
                         custom_objects=cust_objects,
                         samples_per_epoch=64,
                         nb_val_samples=128,
                         verbose=2, overwrite=True,
                         callbacks=get_callback_fix)

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit_gen_async(self, get_model, get_loss_metric,
                                      get_custom_l):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        expected_value = 2
        for val in [None, 1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2,
                                           model=model,
                                           metrics=metrics,
                                           custom_objects=cust_objects,
                                           samples_per_epoch=64,
                                           nb_val_samples=128,
                                           verbose=2, overwrite=True)

            thread.join()

            for k in expe.full_res['metrics']:
                if 'iter' not in k:
                    assert len(
                        expe.full_res['metrics'][k]) == expected_value

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_deserialization(self):
        new_session()
        model = sequential()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')
        ser_mod = to_dict_w_opt(model)
        custom_objects = {'test_loss': [1, 2]}
        custom_objects = {k: serialize(custom_objects[k])
                          for k in custom_objects}
        model_from_dict_w_opt(ser_mod, custom_objects=custom_objects)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def tf_model_eval_distance_dual_input(sess, x, model, X_test1, X_test2):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param model: model output predictions
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    # l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
    #                                 axis=1))
    # l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    # l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test1)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test1)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test1)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test1), start + FLAGS.batch_size)
            cur_batch_size = end - start

            pred_1 = model.eval(feed_dict={x: X_test1[start:end],keras.backend.learning_phase(): 0})
            pred_2 = model.eval(feed_dict={x: X_test2[start:end],keras.backend.learning_phase(): 0})

            l1_dist_vec[start:end] = np.sum(np.abs(pred_1 - pred_2), axis=1)
        assert end >= len(X_test1)

    return l1_dist_vec
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def tf_model_eval_dist_tri_input(sess, x, model, X_test1, X_test2, X_test3, mode = 'max'):
    """
    Compute the accuracy of a TF model on some data
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param model: model output predictions
    :param X_test[1,2,3]: numpy array with testing inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """

    l1_dist_vec = np.zeros((len(X_test1)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test1)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test1)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test1), start + FLAGS.batch_size)
            cur_batch_size = end - start

            pred_1 = model.eval(feed_dict={x: X_test1[start:end],keras.backend.learning_phase(): 0})
            pred_2 = model.eval(feed_dict={x: X_test2[start:end],keras.backend.learning_phase(): 0})
            pred_3 = model.eval(feed_dict={x: X_test3[start:end],keras.backend.learning_phase(): 0})

            l11 = np.sum(np.abs(pred_1 - pred_2), axis=1)
            l12 = np.sum(np.abs(pred_1 - pred_3), axis=1)
            l13 = np.sum(np.abs(pred_2 - pred_3), axis=1)

            if mode == 'max':
                l1_dist_vec[start:end] = np.max(np.array([l11, l12, l13]), axis=0)
            elif mode == 'mean':
                l1_dist_vec[start:end] = np.mean(np.array([l11, l12, l13]), axis=0)
        assert end >= len(X_test1)

        # Divide by number of examples to get final value

    return l1_dist_vec
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
              channels=1, nb_filters=64, nb_classes=10):
    """
        Defines a CNN model using Keras sequential model
        :param logits: If set to False, returns a Keras model, otherwise will also
        return logits tensor
        :param input_ph: The TensorFlow tensor for the input
        (needed if returning logits)
        ("ph" stands for placeholder but it need not actually be a
        placeholder)
        :param img_rows: number of row in the image
        :param img_cols: number of columns in the image
        :param channels: number of color channels (e.g., 1 for MNIST)
        :param nb_filters: number of convolutional filters per layer
        :param nb_classes: the number of output classes
        :return:
        """
    model = Sequential()

    # Define the layers successively (convolution layers are version dependent)
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (channels, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, channels)

    layers = [Dropout(0.2, input_shape=input_shape),
              conv_2d(nb_filters, (8, 8), (2, 2), "same"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
              Activation('relu'),
              Dropout(0.5),
              Flatten(),
              Dense(nb_classes)]

    for layer in layers:
        model.add(layer)

    if logits:
        logits_tensor = model(input_ph)
    model.add(Activation('softmax'))

    if logits:
        return model, logits_tensor
    else:
        return model
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
              channels=1, nb_filters=64, nb_classes=10):
    """
        Defines a CNN model using Keras sequential model
        :param logits: If set to False, returns a Keras model, otherwise will also
        return logits tensor
        :param input_ph: The TensorFlow tensor for the input
        (needed if returning logits)
        ("ph" stands for placeholder but it need not actually be a
        placeholder)
        :param img_rows: number of row in the image
        :param img_cols: number of columns in the image
        :param channels: number of color channels (e.g., 1 for MNIST)
        :param nb_filters: number of convolutional filters per layer
        :param nb_classes: the number of output classes
        :return:
        """
    model = Sequential()

    # Define the layers successively (convolution layers are version dependent)
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (channels, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, channels)

    layers = [Dropout(0.2, input_shape=input_shape),
              conv_2d(nb_filters, (8, 8), (2, 2), "same"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
              Activation('relu'),
              Dropout(0.5),
              Flatten(),
              Dense(nb_classes)]

    for layer in layers:
        model.add(layer)

    if logits:
        logits_tensor = model(input_ph)
    model.add(Activation('softmax'))

    if logits:
        return model, logits_tensor
    else:
        return model
项目:FeatureSqueezing    作者:uvasrg    | 项目源码 | 文件源码
def tf_model_eval_distance_dual_input(sess, x, model, X_test1, X_test2):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param model: model output predictions
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    # l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
    #                                 axis=1))
    # l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    # l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test1)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test1)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test1)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test1), start + FLAGS.batch_size)
            cur_batch_size = end - start

            pred_1 = model.eval(feed_dict={x: X_test1[start:end],keras.backend.learning_phase(): 0})
            pred_2 = model.eval(feed_dict={x: X_test2[start:end],keras.backend.learning_phase(): 0})

            l1_dist_vec[start:end] = np.sum(np.abs(pred_1 - pred_2), axis=1)
        assert end >= len(X_test1)

    return l1_dist_vec
项目:FeatureSqueezing    作者:uvasrg    | 项目源码 | 文件源码
def tf_model_eval_dist_tri_input(sess, x, model, X_test1, X_test2, X_test3, mode = 'max'):
    """
    Compute the accuracy of a TF model on some data
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param model: model output predictions
    :param X_test[1,2,3]: numpy array with testing inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """

    l1_dist_vec = np.zeros((len(X_test1)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test1)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test1)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test1), start + FLAGS.batch_size)
            cur_batch_size = end - start

            pred_1 = model.eval(feed_dict={x: X_test1[start:end],keras.backend.learning_phase(): 0})
            pred_2 = model.eval(feed_dict={x: X_test2[start:end],keras.backend.learning_phase(): 0})
            pred_3 = model.eval(feed_dict={x: X_test3[start:end],keras.backend.learning_phase(): 0})

            l11 = np.sum(np.abs(pred_1 - pred_2), axis=1)
            l12 = np.sum(np.abs(pred_1 - pred_3), axis=1)
            l13 = np.sum(np.abs(pred_2 - pred_3), axis=1)

            if mode == 'max':
                l1_dist_vec[start:end] = np.max(np.array([l11, l12, l13]), axis=0)
            elif mode == 'mean':
                l1_dist_vec[start:end] = np.mean(np.array([l11, l12, l13]), axis=0)
        assert end >= len(X_test1)

        # Divide by number of examples to get final value

    return l1_dist_vec
项目:ktorch    作者:farizrahman4u    | 项目源码 | 文件源码
def bias_add(x, bias, data_format=None):
    def _bias_add(X, data_format):
        x, bias = X
        from keras.backend import image_data_format, ndim, reshape
        if data_format is None:
            data_format = image_data_format()
        if data_format not in {'channels_first', 'channels_last'}:
            raise ValueError('Unknown data_format ' + str(data_format))
        if ndim(bias) != 1 and ndim(bias) != ndim(x) - 1:
            raise ValueError('Unexpected bias dimensions %d, '
                             'expect to be 1 or %d dimensions'
                             % (ndim(bias), ndim(x) - 1))
        bias_shape = tuple(bias.size())
        ndim_x = len(x.size())
        ndim_bias = len(bias_shape)
        if ndim_x == 5:
            if data_format == 'channels_first':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, bias_shape[0], 1, 1, 1))
                else:
                    bias = reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
            elif data_format == 'channels_last':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, 1, 1, 1, bias_shape[0]))
                else:
                    bias = reshape(bias, (1,) + bias_shape)
        elif ndim_x == 4:
            if data_format == 'channels_first':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, bias_shape[0], 1, 1))
                else:
                    bias = reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
            elif data_format == 'channels_last':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, 1, 1, bias_shape[0]))
                else:
                    bias = reshape(bias, (1,) + bias_shape)
        elif ndim_x == 3:
            if data_format == 'channels_first':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, bias_shape[0], 1))
                else:
                    bias = reshape(bias, (1, bias_shape[1], bias_shape[0]))
            elif data_format == 'channels_last':
                if ndim_bias == 1:
                    bias = reshape(bias, (1, 1, bias_shape[0]))
                else:
                    bias = reshape(bias, (1,) + bias_shape)
        return x.add(bias.expand_as(x))

    def _compute_output_shape(X):
        return _get_shape(X[0])

    return get_op(_bias_add, output_shape=_compute_output_shape, arguments=[data_format])([x, bias])
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def decode(y, relu_max):
    print 'decoder input shape:', y._keras_shape
    assert len(y._keras_shape) == 2
    if relu_max:
        x = GaussianNoise(0.2)(y)
        x = Activation(utils.relu_n(1))(x)
    else:
        x = y

    x = Reshape((1, 1, LATENT_DIM))(x)
    # 1, 1, LATENT_DIM
    if relu_max:
        print 'in decode: relu_max:', relu_max
        x = Activation(utils.scale_up(relu_max))(x)
    # x = BN(mode=2, axis=3)(x) # this bn seems not good? NOT VERIFIED

    # why use 512 instead of 256 here?
    batch_size = keras.backend.shape(x)[0]
    x = Deconv2D(512, 6, 6, output_shape=[batch_size, 6, 6, 512],
                 activation='relu', border_mode='same', subsample=(6,6))(x)
    x = BN(mode=2, axis=3)(x)
    # 6, 6, 512
    x = Deconv2D(256, 5, 5, output_shape=[batch_size, 12, 12, 256],
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 12, 12, 256
    x = Deconv2D(128, 5, 5, output_shape=(batch_size, 24, 24, 128),
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 24, 24, 128
    x = Deconv2D(64, 5, 5, output_shape=(batch_size, 48, 48, 64),
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 48, 48, 64
    x = Deconv2D(32, 5, 5, output_shape=(batch_size, 96, 96, 32),
                 activation='relu', border_mode='same', subsample=(2,2))(x)
    x = BN(mode=2, axis=3)(x)
    # 96, 96, 32
    x = Deconv2D(3, 5, 5, output_shape=(batch_size, 96, 96, 3),
                 activation='linear', border_mode='same', subsample=(1,1))(x)
    # 32, 32, 3
    x = BN(mode=2, axis=3)(x)
    return x