Python keras.backend 模块,get_session() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.get_session()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_seq_data_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.seq_data}
        )
        self.assertTrue(
            np.all(
                mask[:, :self.seq_data_max_length]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, self.seq_data_max_length:]
            )
        )
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def to_savedmodel(model, export_path):
  """Convert the Keras HDF5 model into TensorFlow SavedModel."""

  builder = saved_model_builder.SavedModelBuilder(export_path)

  signature = predict_signature_def(inputs={'input': model.inputs[0]},
                                    outputs={'income': model.outputs[0]})

  with K.get_session() as sess:
    builder.add_meta_graph_and_variables(
        sess=sess,
        tags=[tag_constants.SERVING],
        signature_def_map={
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
    )
    builder.save()
项目:neuroblast    作者:ActiveState    | 项目源码 | 文件源码
def ExportModel(self):
        import keras.backend as K
        from tensorflow.python.saved_model import builder as saved_model_builder
        from tensorflow.python.saved_model import utils
        from tensorflow.python.saved_model import tag_constants, signature_constants
        from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
        from tensorflow.contrib.session_bundle import exporter

        print ("EXPORTING MODEL...")

        export_path = 'exported_brain'
        builder = saved_model_builder.SavedModelBuilder(export_path)

        signature = predict_signature_def(inputs={'inputs': self.brain.keras.input},
                                    outputs={'outputs': self.brain.keras.output})

        with K.get_session() as sess:
            builder.add_meta_graph_and_variables(sess=sess,
                                            tags=[tag_constants.TRAINING],
                                            signature_def_map={'predict': signature})
            builder.save()

        print ("...done!")
项目:sample-cnn    作者:tae-jun    | 项目源码 | 文件源码
def predict_tfrecord(self, x_batch):
    if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
      ins = [0.]
    else:
      ins = []
    self._make_tfrecord_predict_function()

    try:
      sess = K.get_session()
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(sess=sess, coord=coord)

      outputs = self.predict_function(ins)

    finally:
      # TODO: If you close the queue, you can't open it again..
      # if stop_queue_runners:
      #   coord.request_stop()
      #   coord.join(threads)
      pass

    if len(outputs) == 1:
      return outputs[0]
    return outputs
项目:deep-learning-essentials    作者:DominicBreuker    | 项目源码 | 文件源码
def convert_weights_theano2tensorflow(model_builder,
                                      theano_weights_file,
                                      tensorflow_weights_file):
    """
    Theano and Tensorflow implement convolutional layers differently.
    This functions transforms pretrained weights for a Theano-based CNN
    to Tensorflow format.
    check out https://github.com/fchollet/keras/wiki/Converting-convolution-kernels-from-Theano-to-TensorFlow-and-vice-versa
    """
    assert K._BACKEND == 'tensorflow'
    model = model_builder(theano_weights_file)
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D',
                                        'Convolution2D',
                                        'Convolution3D',
                                        'AtrousConvolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)

    K.get_session().run(ops)
    model.save_weights(tensorflow_weights_file)
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def export_model(model, absolute_model_dir, best_weights=None, saver=None, global_step=None):
    if not os.path.isdir(absolute_model_dir): 
        os.makedirs(absolute_model_dir)

    model.save_weights(absolute_model_dir + "/last_weights.hdf5", overwrite=True)
    if K._BACKEND == 'tensorflow' and saver != None:
        sess = K.get_session()
        saver.save(sess, absolute_model_dir + '/tf-last_weights', global_step=global_step)

    if best_weights != None:
        model.set_weights(best_weights)
        model.save_weights(absolute_model_dir + "/best_weights.hdf5", overwrite=True)
        if K._BACKEND == 'tensorflow' and saver != None:
            saver.save(sess, absolute_model_dir + '/tf-best_weights', global_step=global_step)

    # Graph
    json = model.to_json()
    open(absolute_model_dir + "/archi.json", 'w').write(json)
    if K._BACKEND == 'tensorflow' and saver != None and global_step == None:
        graph_def = sess.graph.as_graph_def()
        tf.train.write_graph(graph_def, absolute_model_dir, 'tf-model_graph')

        freeze_graph(model, absolute_model_dir, best_weights)
项目:keras-inception-resnet-v2    作者:myutwo150    | 项目源码 | 文件源码
def evaluate(imagenet_dir, batch_size=100, steps=None, num_threads=4, verbose=False):
    with K.get_session().as_default():
        # setup data tensors
        images, labels, num_samples = prepare_data(imagenet_dir, batch_size, num_threads)
        tf.train.start_queue_runners(coord=tf.train.Coordinator())

        # compile model in order to provide `metrics` and `target_tensors`
        model = InceptionResNetV2(input_tensor=images)
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['sparse_categorical_accuracy', 'sparse_top_k_categorical_accuracy'],
                      target_tensors=[labels])

        # start evaluation
        if steps is None:
            steps = int(math.ceil(num_samples / batch_size))
        _, acc1, acc5 = model.evaluate(x=None, y=None, steps=steps, verbose=int(verbose))
        print()
        print('Top-1 Accuracy {:.1%}'.format(acc1))
        print('Top-5 Accuracy {:.1%}'.format(acc5))
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def saveModel(self,outfile):
        self.keras_model.save(self.outputDir+outfile)
        import tensorflow as tf
        import keras.backend as K
        tfsession=K.get_session()
        saver = tf.train.Saver()
        tfoutpath=self.outputDir+outfile+'_tfsession/tf'
        import os
        os.system('rm -rf '+tfoutpath)
        os.system('mkdir -p '+tfoutpath)
        saver.save(tfsession, tfoutpath)


        #import h5py
        #f = h5py.File(self.outputDir+outfile, 'r+')
        #del f['optimizer_weights']
        #f.close()
项目:DIL    作者:FoxRow    | 项目源码 | 文件源码
def yolo_eval(yolo_outputs, image_shape, max_boxes=10, score_threshold=.6, iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
项目:tanda    作者:HazyResearch    | 项目源码 | 文件源码
def __init__(self,
                 tan,
                 featurewise_center=False,
                 samplewise_center=False,
                 featurewise_std_normalization=False,
                 samplewise_std_normalization=False,
                 zca_whitening=False,
                 zca_epsilon=1e-6,
                 rescale=None,
                 preprocessing_function=None,
                 data_format=None):
        super(TANDAImageDataGenerator, self).__init__(
            featurewise_center=featurewise_center,
            samplewise_center=samplewise_center,
            featurewise_std_normalization=featurewise_std_normalization,
            samplewise_std_normalization=samplewise_std_normalization,
            zca_whitening=zca_whitening,
            zca_epsilon=zca_epsilon,
            rescale=rescale,
            preprocessing_function=preprocessing_function,
            data_format=data_format
        )
        self.tan = tan
        self.session = K.get_session()
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(np.any(mask[:, self.mask_start_point:]))
        self.assertTrue(np.all(mask[:, :self.mask_start_point]))
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(np.all(mask[:, :]))
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(
            np.any(mask[:, self.max_length:])
        )
        self.assertTrue(
            np.all(mask[:, :self.max_length])
        )
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask_value(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(np.any(mask))
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(
            np.all(mask)
        )
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def vectorize_words(self, words):
        vectors = []
        for word in words:
            vector = self._word_vector_of.get(word)
            vectors.append(vector)

        num_unknowns = len(filter(lambda x: x is None, vectors))
        inits = self._initializer(shape=(num_unknowns, self._embedding_size))
        inits = K.get_session().run(inits)
        inits = iter(inits)
        for i in range(len(vectors)):
            if vectors[i] is None:
                vectors[i] = next(inits)

        return np.array(vectors)
项目:textfool    作者:bogdan-kulynych    | 项目源码 | 文件源码
def wordwise_grads(self, feature_vectors):
        sess = K.get_session()
        grad_sum = sess.run(self.grad_sum_tensor, feed_dict={
            self.input_tensor: feature_vectors,
            keras.backend.learning_phase(): 0
        })
        return grad_sum
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __enter__(self):
        self.old_session = K.get_session()
        self.g = self.requested_graph or tf.Graph()
        self.current_session = tf.Session(graph=self.g)
        K.set_session(self.current_session)
        return (self.current_session, self.g)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __init__(self, graph=None, using_keras=False):
        self.graph = graph or tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        if using_keras:
            self.using_keras = True
            self.keras_prev_sess = K.get_session()
        else:
            self.using_keras = False
            self.keras_prev_sess = None
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def start(self):
        # import tensorflow as tf
        # self._sess = tf.get_default_session()
        self._sess = KB.get_session()
        super(ShareSessionThread, self).start()
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def _run_initsync(self):
        # tparams = [list(chain(*tp)) for tp in self._tower_params]
        tparams = self._tower_params

        # Check to prevent from unnecessarily re-initializing and
        # synchronizing, i.e. when the model loads the weights.
        for v in chain.from_iterable(tparams):
            if getattr(v, '_keras_initialized', False):
                return

        KB.manual_variable_initialization(True)
        sess = KB.get_session()
        KB.manual_variable_initialization(False)

        # glob_variables = tf.global_variables()
        # sess.run(tf.variables_initializer(glob_variables))

        # Initialize on GPU0 and sync to other GPUs
        init_op = tf.variables_initializer(tparams[0])
        # init_op = tf.variables_initializer(self._tower_params[0])
        # init_op = tf.variables_initializer(self.trainable_weights)
        sess.run(init_op)

        # Important if using model_creator. Not necessary of model instance is
        # reused in which case the model layers are shared between slices
        # and are automatically sync'd.
        sync_op = all_sync_params(tparams, self._gdev_list,
                                  usenccl=self._usenccl)
        sess.run(sync_op)

        for v in chain.from_iterable(tparams):
            v._keras_initialized = True


# Data-parallel ref: https://github.com/fchollet/keras/issues/2436
# Tower-parallel:
# ref: https://medium.com/autonomous-agents/multi-gpu-training-of-large-sparse-matrix-on-wide-neuralnetwork-cac7afc52ffe @IgnorePep8
# ref: https://gist.github.com/vvpreetham/1379cc4e208ea33ce3e615067e92fc5e
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def evaluate(self, metrics, predcol=None, targetcol=-1):
        from keras import backend as K

        def compute(metric, targets, preds):
            result = metric(K.variable(targets), K.variable(preds))
            is_theano = K.backend() == 'theano'
            sess = None if is_theano else K.get_session()
            result = result.eval() if is_theano else result.eval(session=sess)
            is_vector = hasattr(result, '__iter__')
            return float(np.mean(result) if is_vector else result)

        return EvalNut(self, metrics, compute, predcol, targetcol)
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights()), " after initializer run")
        else:
            print(layer, "not reinitialized")
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights()), " after initializer run")
        else:
            print(layer, "not reinitialized")
项目:weightnorm    作者:openai    | 项目源码 | 文件源码
def data_based_init(model, input):

    # input can be dict, numpy array, or list of numpy arrays
    if type(input) is dict:
        feed_dict = input
    elif type(input) is list:
        feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
    else:
        feed_dict = {model.inputs[0]: input}

    # add learning phase if required
    if model.uses_learning_phase and K.learning_phase() not in feed_dict:
        feed_dict.update({K.learning_phase(): 1})

    # get all layer name, output, weight, bias tuples
    layer_output_weight_bias = []
    for l in model.layers:
        if hasattr(l, 'W') and hasattr(l, 'b'):
            assert(l.built)
            layer_output_weight_bias.append( (l.name,l.get_output_at(0),l.W,l.b) ) # if more than one node, only use the first

    # iterate over our list and do data dependent init
    sess = K.get_session()
    for l,o,W,b in layer_output_weight_bias:
        print('Performing data dependent initialization for layer ' + l)
        m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
        s = tf.sqrt(v + 1e-10)
        updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))
        sess.run(updates, feed_dict)
项目:keras_text_classifier    作者:cdj0311    | 项目源码 | 文件源码
def th2tf( model):
    import tensorflow as tf
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    K.get_session().run(ops)
    return model
项目:deform-conv    作者:felixlaumon    | 项目源码 | 文件源码
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        total_loss = self.model.total_loss
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    # dense_1/bias:0 > dense_1/bias_0
                    name = weight.name.replace(':', '_')
                    tf.summary.histogram(name, weight)
                    tf.summary.histogram(
                        '{}_gradients'.format(name),
                        K.gradients(total_loss, [weight])[0]
                    )
                    if self.write_images:
                        w_img = tf.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)
                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)
                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)
                        tf.summary.image(name, w_img)

                if hasattr(layer, 'output'):
                    tf.summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf.summary.merge_all()

        if self.write_graph:
            self.writer = tf.summary.FileWriter(self.log_dir,
                                                self.sess.graph)
        else:
            self.writer = tf.summary.FileWriter(self.log_dir)
项目:deform-conv    作者:felixlaumon    | 项目源码 | 文件源码
def keras_set_tf_debug():
    sess = K.get_session()
    sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
    K.set_session(sess)
项目:stuff    作者:yaroslavvb    | 项目源码 | 文件源码
def data_based_init(model, input):

    # input can be dict, numpy array, or list of numpy arrays
    if type(input) is dict:
        feed_dict = input
    elif type(input) is list:
        feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
    else:
        feed_dict = {model.inputs[0]: input}

    # add learning phase if required
    if model.uses_learning_phase and K.learning_phase() not in feed_dict:
        feed_dict.update({K.learning_phase(): 1})

    # get all layer name, output, weight, bias tuples
    layer_output_weight_bias = []
    for l in model.layers:
        if hasattr(l, 'W') and hasattr(l, 'b'):
            assert(l.built)
            layer_output_weight_bias.append( (l.name,l.get_output_at(0),l.W,l.b) ) # if more than one node, only use the first

    # iterate over our list and do data dependent init
    sess = K.get_session()
    for l,o,W,b in layer_output_weight_bias:
        print('Performing data dependent initialization for layer ' + l)
        m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
        s = tf.sqrt(v + 1e-10)
        updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))
        sess.run(updates, feed_dict)
项目:captcha-breaker    作者:Detry322    | 项目源码 | 文件源码
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights())," after initializer run")
        else:
            print(layer, "not reinitialized")
项目:YAD2K    作者:allanzelener    | 项目源码 | 文件源码
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_convolution_transpose_th(self):
        if K._BACKEND != 'tensorflow':
            return True
        K.set_image_dim_ordering('th')

        border_mode = 'valid'
        batch = 1
        height = 10
        width = 10
        channels_in = 1
        channels_out = 2
        kernel_size = 3
        rate = 2
        input_shape = (channels_in, height, width)

        input = Input(shape=input_shape, dtype=K.floatx())
        conv_layer = ATrousConvolution2D(channels_out, kernel_size, kernel_size, 
                rate, dim_ordering=K.image_dim_ordering(), init='one', 
                border_mode=border_mode, activation='linear')
        output = conv_layer(input)
        model = Model(input=[input], output=[output])
        model.compile(loss='mean_squared_error', optimizer='sgd')

        x = np.ones((batch,) + input_shape).astype(K.floatx())
        kernel = conv_layer.W
        output_model = model.predict(x)
        if K._BACKEND == 'tensorflow':
            x = tf.transpose(x, (0, 2, 3, 1))
            kernel = tf.transpose(kernel, (2, 3, 1, 0))

            y = tf.nn.atrous_conv2d(x, kernel, rate, padding=border_mode.upper())

            y = tf.transpose(y, (0, 3, 1, 2))
            output = y.eval(session=K.get_session())

        self.assertEqual(output_model.shape, (1, 2, 6, 6))    
        self.assertEqual(output.shape, (1, 2, 6, 6))
        self.assertEqual(True, (output==output_model).all())
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_convolution_transpose_tf(self):
        if K._BACKEND != 'tensorflow':
            return True
        K.set_image_dim_ordering('tf')

        border_mode = 'valid'
        batch = 1
        height = 10
        width = 10
        channels_in = 1
        channels_out = 2
        kernel_size = 3
        # effective kernel size: kernel_size + (kernel_size - 1) * (rate - 1)
        rate = 2
        input_shape = (height, width, channels_in)

        input = Input(shape=input_shape, dtype=K.floatx())
        conv_layer = ATrousConvolution2D(channels_out, kernel_size, kernel_size, 
                rate, dim_ordering=K.image_dim_ordering(), init='one', 
                border_mode=border_mode, activation='linear')
        output = conv_layer(input)
        model = Model(input=[input], output=[output])
        model.compile(loss='mean_squared_error', optimizer='sgd')

        x = np.ones((batch,) + input_shape).astype(K.floatx())
        kernel = conv_layer.W
        output_model = model.predict(x)
        if K._BACKEND == 'tensorflow':
            y = tf.nn.atrous_conv2d(x, kernel, rate, padding=border_mode.upper())
            output = y.eval(session=K.get_session())

        self.assertEqual(output_model.shape, (1, 6, 6, 2))    
        self.assertEqual(output.shape, (1, 6, 6, 2))
        self.assertEqual(True, (output==output_model).all())
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_convolution_transpose_tf_sameborder(self):
        if K._BACKEND != 'tensorflow':
            return True
        K.set_image_dim_ordering('tf')

        border_mode = 'same'
        batch = 1
        height = 10
        width = 10
        channels_in = 1
        channels_out = 2
        kernel_size = 3
        # effective kernel size: kernel_size + (kernel_size - 1) * (rate - 1)
        rate = 2
        input_shape = (height, width, channels_in)

        input = Input(shape=input_shape, dtype=K.floatx())
        conv_layer = ATrousConvolution2D(channels_out, kernel_size, kernel_size, 
                rate, dim_ordering=K.image_dim_ordering(), init='one', 
                border_mode=border_mode, activation='linear')
        output = conv_layer(input)
        model = Model(input=[input], output=[output])
        model.compile(loss='mean_squared_error', optimizer='sgd')

        x = np.ones((batch,) + input_shape).astype(K.floatx())
        kernel = conv_layer.W
        output_model = model.predict(x)
        if K._BACKEND == 'tensorflow':
            y = tf.nn.atrous_conv2d(x, kernel, rate, padding=border_mode.upper())
            output = y.eval(session=K.get_session())

        self.assertEqual(output_model.shape, (1, 10, 10, 2))    
        self.assertEqual(output.shape, (1, 10, 10, 2))
        self.assertEqual(True, (output==output_model).all())
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def __init__(self, input_shape, classes, model_save_filepath):        
        self.model_save_filepath = model_save_filepath  

        self.neptune_organizer = None

        self.old_session = K.get_session()
        session = tf.Session('')
        K.set_session(session)
        K.set_learning_phase(1)

        face_input = Input(batch_shape=(None,) + (input_shape))

        pretrained_model = VGG16(input_tensor=face_input, 
                                 weights='imagenet', 
                                 include_top=False)
        x = pretrained_model.get_layer('block4_pool').output

        x = Flatten(name='flatten')(x)
        x = Dense(256, activation='relu', name='fc1')(x)
        x = Dense(256, activation='relu', name='fc2')(x)
        output = Dense(classes, activation='softmax', name='predictions')(x)

        self.facenet = Model(face_input, output)
        self.facenet.compile(optimizer='adam',
                             loss='categorical_crossentropy',
                             metrics=['accuracy'])
        self.facenet.summary()

        self.datagen = ImageDataGenerator(rotation_range=5,
                                          horizontal_flip=False, 
                                          vertical_flip=True)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_limit_mem():
    from keras import backend as K
    K.get_session().close()
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))#@tf_force_cpu