Python keras.layers.core 模块,Lambda() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Lambda()

项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def distance_layer(x1, x2):
    """Distance and angle of two inputs.

    Compute the concatenation of element-wise subtraction and
    multiplication of two inputs.

    """
    def _distance(args):
        x1 = args[0]
        x2 = args[1]
        x = K.abs(x1 - x2)
        return x

    def _multiply(args):
        x1 = args[0]
        x2 = args[1]
        return x1 * x2

    distance = Lambda(_distance, output_shape=(K.int_shape(x1)[-1],))([x1, x2])
    multiply = Lambda(_multiply, output_shape=(K.int_shape(x1)[-1],))([x1, x2])
    return concatenate([distance, multiply])
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800
    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)

    model = Sequential()
    model.add(Merge([con] + emb_list,mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point, output_shape =[2]))
    return model
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def splittensor(axis=1, ratio_split=1, id_split=0,**kwargs):
    def f(X):
        div = X.shape[axis] // ratio_split

        if axis == 0:
            output =  X[id_split*div:(id_split+1)*div,:,:,:]
        elif axis == 1:
            output =  X[:, id_split*div:(id_split+1)*div, :, :]
        elif axis == 2:
            output = X[:,:,id_split*div:(id_split+1)*div,:]
        elif axis == 3:
            output == X[:,:,:,id_split*div:(id_split+1)*div]
        else:
            raise ValueError("This axis is not possible")

        return output

    def g(input_shape):
        output_shape=list(input_shape)
        output_shape[axis] = output_shape[axis] // ratio_split
        return tuple(output_shape)

    return Lambda(f,output_shape=lambda input_shape:g(input_shape),**kwargs)
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def splittensor(axis=1, ratio_split=1, id_split=0,**kwargs):
    def f(X):
        div = X.shape[axis] // ratio_split

        if axis == 0:
            output =  X[id_split*div:(id_split+1)*div,:,:,:]
        elif axis == 1:
            output =  X[:, id_split*div:(id_split+1)*div, :, :]
        elif axis == 2:
            output = X[:,:,id_split*div:(id_split+1)*div,:]
        elif axis == 3:
            output == X[:,:,:,id_split*div:(id_split+1)*div]
        else:
            raise ValueError("This axis is not possible")

        return output

    def g(input_shape):
        output_shape=list(input_shape)
        output_shape[axis] = output_shape[axis] // ratio_split
        return tuple(output_shape)

    return Lambda(f,output_shape=lambda input_shape:g(input_shape),**kwargs)
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __call__(self, x1, x2):
        def _sub_ops(args):
            x1 = args[0]
            x2 = args[1]
            x = K.abs(x1 - x2)
            return x

        def _mult_ops(args):
            x1 = args[0]
            x2 = args[1]
            return x1 * x2

        output_shape = (self.sequence_length, self.input_dim,)
        sub = Lambda(_sub_ops, output_shape=output_shape)([x1, x2])
        mult = Lambda(_mult_ops, output_shape=output_shape)([x1, x2])
        sub = self.model(sub)
        mult = self.model(mult)
        return concatenate([sub, mult])
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def adverse_model(discriminator):

    train_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    def margin_opt(inputs):
        assert len(inputs) == 2, ('Margin Output needs '
                              '2 inputs, %d given' % len(inputs))
        return K.log(inputs[0]) + K.log(1-inputs[1])

    margin = Lambda(margin_opt, output_shape=(lambda s : (None, 1)))\
               ([discriminator(train_input), discriminator(hypo_input)])
    adverserial = Model([train_input, hypo_input], margin)

    adverserial.compile(loss=minimize, optimizer='adam')
    return adverserial
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def build_model(dropout):
    model = Sequential()
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = INPUT_SHAPE))
    model.add(Conv2D(3, (1, 1), activation='relu'))
    model.add(Conv2D(12, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(24, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(48, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dropout(dropout))
    model.add(Dense(64, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(32, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(1))

    return model
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def splittensor(axis=1, ratio_split=1, id_split=0,**kwargs):
    def f(X):
        div = X.shape[axis] // ratio_split

        if axis == 0:
            output =  X[id_split*div:(id_split+1)*div,:,:,:]
        elif axis == 1:
            output =  X[:, id_split*div:(id_split+1)*div, :, :]
        elif axis == 2:
            output = X[:,:,id_split*div:(id_split+1)*div,:]
        elif axis == 3:
            output == X[:,:,:,id_split*div:(id_split+1)*div]
        else:
            raise ValueError("This axis is not possible")

        return output

    def g(input_shape):
        output_shape=list(input_shape)
        output_shape[axis] = output_shape[axis] // ratio_split
        return tuple(output_shape)

    return Lambda(f,output_shape=lambda input_shape:g(input_shape),**kwargs)
项目:eXposeDeepNeuralNetwork    作者:joshsaxe    | 项目源码 | 文件源码
def getconvmodel(filter_length,nb_filter):
    model = Sequential()
    model.add(Convolution1D(nb_filter=nb_filter,
                            input_shape=(100,32),
                            filter_length=filter_length,
                            border_mode='same',
                            activation='relu',
                            subsample_length=1))
    model.add(Lambda(sum_1d, output_shape=(nb_filter,)))
    #model.add(BatchNormalization(mode=0))
    model.add(Dropout(0.5))
    return model
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def splittensor(axis=1, ratio_split=1, id_split=0, **kwargs):
    def f(X):
        div = K.shape(X)[axis] // ratio_split

        if axis == 0:
            output = X[id_split*div:(id_split+1)*div, :, :, :]
        elif axis == 1:
            output = X[:, id_split*div:(id_split+1)*div, :, :]
        elif axis == 2:
            output = X[:, :, id_split*div:(id_split+1)*div, :]
        elif axis == 3:
            output = X[:, :, :, id_split*div:(id_split+1)*div]
        else:
            raise ValueError("This axis is not possible")

        return output

    def g(input_shape):
        output_shape = list(input_shape)
        output_shape[axis] = output_shape[axis] // ratio_split
        return tuple(output_shape)

    return Lambda(f, output_shape=lambda input_shape: g(input_shape), **kwargs)
项目:word2vec-keras-in-gensim    作者:niitsuma    | 项目源码 | 文件源码
def build_keras_model_sg(index_size,vector_size,
                         context_size,
                         #code_dim,
                         sub_batch_size=256,
                         learn_vectors=True,learn_hidden=True,
                         model=None):

    kerasmodel = Graph()
    kerasmodel.add_input(name='point' , input_shape=(1,), dtype=int)
    kerasmodel.add_input(name='index' , input_shape=(1,), dtype=int)
    kerasmodel.add_node(Embedding(index_size, vector_size, input_length=sub_batch_size,weights=[model.syn0]),name='embedding', input='index')
    kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[model.keras_syn1]),name='embedpoint', input='point')
    kerasmodel.add_node(Lambda(lambda x:x.sum(2))   , name='merge',inputs=['embedding','embedpoint'], merge_mode='mul')
    kerasmodel.add_node(Activation('sigmoid'), name='sigmoid', input='merge')
    kerasmodel.add_output(name='code',input='sigmoid')
    kerasmodel.compile('rmsprop', {'code':'mse'})
    return kerasmodel
项目:word2vec-keras-in-gensim    作者:niitsuma    | 项目源码 | 文件源码
def build_keras_model_cbow(index_size,vector_size,
                           context_size,
                           #code_dim,
                           sub_batch_size=1,
                           model=None,cbow_mean=False):

    kerasmodel = Graph()
    kerasmodel.add_input(name='point' , input_shape=(sub_batch_size,), dtype='int')
    kerasmodel.add_input(name='index' , input_shape=(1,), dtype='int')
    kerasmodel.add_node(Embedding(index_size, vector_size, weights=[model.syn0]),name='embedding', input='index')
    kerasmodel.add_node(Embedding(context_size, vector_size, input_length=sub_batch_size,weights=[model.keras_syn1]),name='embedpoint', input='point')
    if cbow_mean:
        kerasmodel.add_node(Lambda(lambda x:x.mean(1),output_shape=(vector_size,)),name='average',input='embedding')
    else:
        kerasmodel.add_node(Lambda(lambda x:x.sum(1),output_shape=(vector_size,)),name='average',input='embedding')

    kerasmodel.add_node(Activation('sigmoid'), name='sigmoid',inputs=['average','embedpoint'], merge_mode='dot',dot_axes=-1)
    kerasmodel.add_output(name='code',input='sigmoid')
    kerasmodel.compile('rmsprop', {'code':'mse'})
    return kerasmodel
项目:convnets-keras    作者:heuritech    | 项目源码 | 文件源码
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """

    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
                                              , (0, half))
        extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:, i:i + ch, :, :]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
项目:convnets-keras    作者:heuritech    | 项目源码 | 文件源码
def splittensor(axis=1, ratio_split=1, id_split=0, **kwargs):
    def f(X):
        div = X.shape[axis] // ratio_split

        if axis == 0:
            output = X[id_split * div:(id_split + 1) * div, :, :, :]
        elif axis == 1:
            output = X[:, id_split * div:(id_split + 1) * div, :, :]
        elif axis == 2:
            output = X[:, :, id_split * div:(id_split + 1) * div, :]
        elif axis == 3:
            output = X[:, :, :, id_split * div:(id_split + 1) * div]
        else:
            raise ValueError('This axis is not possible')

        return output

    def g(input_shape):
        output_shape = list(input_shape)
        output_shape[axis] = output_shape[axis] // ratio_split
        return tuple(output_shape)

    return Lambda(f, output_shape=lambda input_shape: g(input_shape), **kwargs)
项目:keras-dogs    作者:ahangchen    | 项目源码 | 文件源码
def make_parallel(model, gpu_count):
    def get_slice(data, idx, parts):
        shape = tf.shape(data)
        size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
        stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
        start = stride * idx
        return tf.slice(data, start, size)

    outputs_all = []
    for i in range(len(model.outputs)):
        outputs_all.append([])

    # Place a copy of the model on each GPU, each getting a slice of the batch
    for i in range(gpu_count):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i) as scope:

                inputs = []
                # Slice each input into a piece for processing on this GPU
                for x in model.inputs:
                    input_shape = tuple(x.get_shape().as_list())[1:]
                    slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)
                    inputs.append(slice_n)

                outputs = model(inputs)

                if not isinstance(outputs, list):
                    outputs = [outputs]

                # Save all the outputs for merging back together later
                for l in range(len(outputs)):
                    outputs_all[l].append(outputs[l])

    # merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs in outputs_all:
            merged.append(Concatenate(axis=0)(outputs))

        return Model(model.inputs, merged)
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_chars, nb_per_word,
                 embedding_dim, rnn_dim, rnn_unit='gru', dropout=0.0):
        def _collapse_input(x, nb_per_word=0):
            x = K.reshape(x, (-1, nb_per_word))
            return x

        def _unroll_input(x, sequence_length=0, rnn_dim=0):
            x = K.reshape(x, (-1, sequence_length, rnn_dim))
            return x

        if rnn_unit == 'gru':
            rnn = GRU
        else:
            rnn = LSTM
        self.model = Sequential()
        self.model.add(Lambda(_collapse_input,
                              arguments={'nb_per_word': nb_per_word},
                              output_shape=(nb_per_word,),
                              input_shape=(sequence_length, nb_per_word,)))
        self.model.add(Embedding(nb_chars,
                                 embedding_dim,
                                 input_length=nb_per_word,
                                 trainable=True))
        self.model.add(rnn(rnn_dim,
                           dropout=dropout,
                           recurrent_dropout=dropout))
        self.model.add(Lambda(_unroll_input,
                              arguments={'sequence_length': sequence_length,
                                         'rnn_dim': rnn_dim},
                              output_shape=(sequence_length, rnn_dim)))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(Lambda,
               kwargs={'function': lambda x, a, b: x * a + b,
                       'arguments': {'a': 0.6, 'b': 0.4}},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def _build_multi_gpu_model(blueprint, devices):
    import tensorflow as tf
    model = _build_single_device_model(blueprint, cpu_device())
    gpu_devices = [d for d in devices if is_gpu_device(d)]
    gpu_count = len(gpu_devices)

    def get_input(data, idx, parts):
        shape = tf.shape(data)
        size = tf.concat([ shape[:1] // parts, shape[1:] ], 0)
        stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ], 0)
        start = stride * idx
        return tf.slice(data, start, size)

    outputs = []
    for i, device in enumerate(gpu_devices):
        with tf.device(device):
            x = model.inputs[0]
            input_shape = tuple(x.get_shape().as_list())[1:]
            model_input = Lambda(
                get_input, 
                output_shape=input_shape, 
                arguments={'idx':i,'parts':gpu_count})(x)
            outputs.append(model(model_input))
    with tf.device(cpu_device()):
        output = merge(outputs, mode='concat', concat_axis=0)
        return MultiGpuModel(
            model, 
            model_input=model.inputs, 
            model_output=output)
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __init__(self, sequence_length, nb_chars, nb_per_word,
                 embedding_dim, rnn_dim, rnn_unit='gru', dropout=0.0):
        def _collapse_input(x, nb_per_word=0):
            x = K.reshape(x, (-1, nb_per_word))
            return x

        def _unroll_input(x, sequence_length=0, rnn_dim=0):
            x = K.reshape(x, (-1, sequence_length, rnn_dim))
            return x

        if rnn_unit == 'gru':
            rnn = GRU
        else:
            rnn = LSTM
        self.model = Sequential()
        self.model.add(Lambda(_collapse_input,
                              arguments={'nb_per_word': nb_per_word},
                              output_shape=(nb_per_word,),
                              input_shape=(sequence_length, nb_per_word,)))
        self.model.add(Embedding(nb_chars,
                                 embedding_dim,
                                 input_length=nb_per_word,
                                 trainable=True))
        self.model.add(rnn(rnn_dim,
                           dropout=dropout,
                           recurrent_dropout=dropout))
        self.model.add(Lambda(_unroll_input,
                              arguments={'sequence_length': sequence_length,
                                         'rnn_dim': rnn_dim},
                              output_shape=(sequence_length, rnn_dim)))
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __call__(self, x1, x2):
        def _dot_product(args):
            x = args[0]
            y = args[1]
            return K.batch_dot(x, K.permute_dimensions(y, (0, 2, 1)))

        def _normalize(args, transpose=False):
            att_w = args[0]
            x = args[1]
            if transpose:
                att_w = K.permute_dimensions(att_w, (0, 2, 1))
            e = K.exp(att_w - K.max(att_w, axis=-1, keepdims=True))
            sum_e = K.sum(e, axis=-1, keepdims=True)
            nor_e = e / sum_e
            return K.batch_dot(nor_e, x)

        # (batch_size, timesteps1, dim)
        f1 = self.model(x1)
        # (batch_size, timesteps2, dim)
        f2 = self.model(x2)
        output_shape = (self.sequence_length, self.sequence_length,)
        # attention weights, (batch_size, timesteps1, timesteps2)
        att_w = Lambda(
            _dot_product,
            output_shape=output_shape)([f1, f2])
        output_shape = (self.sequence_length, self.input_dim,)
        # (batch_size, timesteps1, dim)
        att1 = Lambda(
            _normalize, arguments={'transpose': False},
            output_shape=output_shape)([att_w, x2])
        # (batch_size, timestep2, dim)
        att2 = Lambda(
            _normalize, arguments={'transpose': True},
            output_shape=output_shape)([att_w, x1])
        return att1, att2
项目:explainable-cnn    作者:blengerich    | 项目源码 | 文件源码
def get_simple_cnn(height, width):
    """ A simple CNN that has the same input/output shapes as the VGG16 model.

    Args:
        height: input height
        width: input width
    Return: Keras model

    """
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((4, 4), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((4, 4), strides=(2, 2)))

    #model.add(ZeroPadding2D((1, 1)))
    #model.add(Convolution2D(64, 3, 3, activation='relu'))
    #model.add(MaxPooling2D((4, 4), strides=(2, 2)))

    #model.add(ZeroPadding2D((1, 1)))
    #model.add(Convolution2D(512, 3, 3, activation='relu'))
    #model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(Lambda(global_average_pooling,
                     output_shape=global_average_pooling_shape))
    model.add(Dense(2, activation="softmax", init="uniform"))
    return model
项目:MovieTaster-Open    作者:lujiaying    | 项目源码 | 文件源码
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
    model = keras.models.Sequential()
    model.add(Embedding(dict_size, emb_size, 
        input_length=context_window_size,
        embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
        ))
    model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
    model.add(Dense(dict_size))
    model.add(Activation('softmax')) # TODO: use nce

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
            loss='categorical_crossentropy',)
    return model
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def baseline_train(noise_examples, hidden_size, noise_dim, glove, hypo_len, version):
    prem_input = Input(shape=(None,), dtype='int32', name='prem_input')
    hypo_input = Input(shape=(hypo_len + 1,), dtype='int32', name='hypo_input')
    noise_input = Input(shape=(1,), dtype='int32', name='noise_input')
    train_input = Input(shape=(None,), dtype='int32', name='train_input')
    class_input = Input(shape=(3,), name='class_input')
    concat_dim = hidden_size + noise_dim + 3
    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, hypo_len + 1)(hypo_input)

    premise_layer = LSTM(output_dim=hidden_size, return_sequences=False,
                            inner_activation='sigmoid', name='premise')(prem_embeddings)

    noise_layer = Embedding(noise_examples, noise_dim,
                            input_length = 1, name='noise_embeddings')(noise_input)
    flat_noise = Flatten(name='noise_flatten')(noise_layer)    
    merged = merge([premise_layer, class_input, flat_noise], mode='concat')
    creative = Dense(concat_dim, name = 'cmerge')(merged)
    fake_merge = Lambda(lambda x:x[0], output_shape=lambda x:x[0])([hypo_embeddings, creative])
    hypo_layer = FeedLSTM(output_dim=concat_dim, return_sequences=True,
                         feed_layer = creative, inner_activation='sigmoid', 
                         name='attention')([fake_merge])

    hs = HierarchicalSoftmax(len(glove), trainable = True, name='hs')([hypo_layer, train_input])
    inputs = [prem_input, hypo_input, noise_input, train_input, class_input]


    model_name = 'version' + str(version)
    model = Model(input=inputs, output=hs, name = model_name)
    model.compile(loss=hs_categorical_crossentropy, optimizer='adam')

    return model
项目:OCR-Handwritten-Text    作者:iitmcvg    | 项目源码 | 文件源码
def OCR():
    model = Sequential()
    model.add(Lambda(vgg_preprocess, input_shape=(3,32,32)))
    print ("in ocr")
    ConvBlock(2, model, 16)
    ConvBlock(2, model, 16)
    model.add(Flatten())
    model.add(Dense(192, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(62, activation='softmax'))
    print ("outside OCR")
    return model
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildDecomposition(self):
        q_embedding = self.tensors['q-embedding']
        a_embedding = self.tensors['a-embedding']
        q_match = self.tensors['q-match']
        a_match = self.tensors['a-match']
        # compute q+, q-, a+, a-
        # ?????????????BATCH_SIZE????????????
        # ??Lambda???Lambda?ouput_shape?????BATCH_SIZE??
        # ??????Lambda????????BATCH_SIZE???????
        # ????BATCH_SIZE?????????????????????
        # ????Merge??BATCH_SIZE????Lambda??
        q_channels = Merge(
            mode=lambda x: decomposite(*x),
            output_shape=(self.params['batch_size'], 2, self.q_length, self.wdim),
            name='q-channels'
        )([q_embedding, q_match])

        a_channels = Merge(
            mode=lambda x: decomposite(*x),
            output_shape=(self.params['batch_size'], 2, self.a_length, self.wdim),
            name='a-channels',
        )([a_embedding, a_match])
        print('q_channels', q_channels._keras_shape, K.ndim(q_channels))
        print('a_channels', a_channels._keras_shape, K.ndim(a_channels))
        self.tensors['q-channels'] = q_channels
        self.tensors['a-channels'] = a_channels
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def linkFeature(self, input_name, conv_name, activation='tanh'):
        print('Am I called')
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        convs = self.layers.get(conv_name)
        assert filters
        assert convs
        features = []
        for fsz, conv in zip(filters, convs):
            conv_output = conv(self.tensors[input_name])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % (input_name, fsz)
                )(conv_output)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (input_name, fsz)
                )(conv_output)
            maxpool = Lambda(
                lambda x: K.max(x[:,:,:,0], axis=2),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (input_name, fsz)
            )(act)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(mode='concat', name='%s-feature' % input_name)(features)
        else:
            return features[0]
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def doubleFeature(self, pos, neg, conv_name, activation='tanh'):
        name = '%s+%s' % (pos, neg)
        filters = self.params['filters']
        nb_filter = self.params['nb_filter']
        convs = self.layers[conv_name]
        features = []
        pos = self.tensors[pos]
        neg = self.tensors[neg]
        for fsz, conv in zip(filters, convs):
            sum = Merge(
                mode='sum',
            )([conv(pos), conv(neg)])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % ('+'.join(input_names), fsz)
                )(sum)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (name, fsz)
                )(sum)
            maxpool = Lambda(
                lambda x: K.max(x, axis=1),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (name, fsz)
            )(act)
            print('maxpool', maxpool._keras_shape)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(
                mode='concat', 
                name='%s-feature' % name,
            )(features)
        else:
            return features[0]
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(Lambda,
               kwargs={'function': lambda x, a, b: x * a + b,
                       'arguments': {'a': 0.6, 'b': 0.4}},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
项目:pixelcnn_keras    作者:suga93    | 项目源码 | 文件源码
def __call__(self, xW, layer_idx):
        '''calculate gated activation maps given input maps '''
        if self.stack_name == 'vertical':
            stack_tag = 'v'
        elif self.stack_name == 'horizontal':
            stack_tag = 'h'

        if self.crop_right:
            xW = Lambda(self._crop_right, name='h_crop_right_'+str(layer_idx))(xW)

        if self.v_map is not None:
            xW = merge([xW, self.v_map], mode='sum', name='h_merge_v_'+str(layer_idx))

        if self.h is not None:
            hV = Dense(output_dim=2*self.nb_filters, name=stack_tag+'_dense_latent_'+str(layer_idx))(self.h)
            hV = Reshape((1, 1, 2*self.nb_filters), name=stack_tag+'_reshape_latent_'+str(layer_idx))(hV)
            #xW = merge([xW, hV], mode=lambda x: x[0]+x[1])
            xW = Lambda(lambda x: x[0]+x[1], name=stack_tag+'_merge_latent_'+str(layer_idx))([xW,hV])

        xW_f = Lambda(lambda x: x[:,:,:,:self.nb_filters], name=stack_tag+'_Wf_'+str(layer_idx))(xW)
        xW_g = Lambda(lambda x: x[:,:,:,self.nb_filters:], name=stack_tag+'_Wg_'+str(layer_idx))(xW)

        xW_f = Lambda(lambda x: K.tanh(x), name=stack_tag+'_tanh_'+str(layer_idx))(xW_f)
        xW_g = Lambda(lambda x: K.sigmoid(x), name=stack_tag+'_sigmoid_'+str(layer_idx))(xW_g)

        res = merge([xW_f, xW_g], mode='mul', name=stack_tag+'_merge_gate_'+str(layer_idx))
        #print(type(res), K.int_shape(res), hasattr(res, '_keras_history'))
        return res
项目:pixelcnn_keras    作者:suga93    | 项目源码 | 文件源码
def _shift_down(x):
        x_shape = K.int_shape(x)
        x = ZeroPadding2D(padding=(1,0,0,0))(x)
        x = Lambda(lambda x: x[:,:x_shape[1],:,:])(x)
        return x
项目:pixelcnn_keras    作者:suga93    | 项目源码 | 文件源码
def _feed_v_map(self, x, layer_idx):
        ### shifting down feature maps
        x = Lambda(self._shift_down, name='v_shift_down'+str(layer_idx))(x)
        x = Convolution2D(2*self.nb_filters, 1, 1, border_mode='valid', name='v_1x1_conv_'+str(layer_idx))(x)
        return x
项目:LearnGraphDiscovery    作者:eugenium    | 项目源码 | 文件源码
def constructNet(input_dim=784,n_hidden=1000,n_out=1000,nb_filter=50,prob=0.5,lr=0.0001):
    nb_filters=50
    input_img= Input(shape=list(input_dim))
    a = input_img

    a1 = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(1,1),border_mode='same')(a)    
    b = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(1,1),border_mode='same')(a)  #We only use the diagonal output from this, TODO: only filter diagonal
    a2=Lambda(GetDiag, output_shape=out_diag_shape)(b)
    comb=merge([a1,a2],mode='sum')
    comb = BatchNormalization()(comb)  
    a = Activation('relu')(comb)

    l=5
    for i in range(1,l):
        a1 = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(l,l),border_mode='same')(a)    
        b = AtrousConvolution2D(nb_filters, 3, 3,atrous_rate=(l,l),border_mode='same')(a)  #We only use the diagonal output from this, TODO: only filter diagonal
        a2=Lambda(GetDiag, output_shape=out_diag_shape)(b)
        comb=merge([a1,a2],mode='sum')
        comb = BatchNormalization()(comb)  
        a = Activation('relu')(comb)

    decoded = Convolution2D(1, 1, 1, activation='sigmoid', border_mode='same')(a)
    final=Flatten()(decoded)
    model = Model(input_img, final)
    model.summary()
    model.compile(optimizer='adam', loss='binary_crossentropy')
    return model
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def prepare_tensorflow_multi_gpu_model(model, kwargs):
    multi_input = True if type(model.input_shape) is list else False
    multi_output = True if len(model.outputs) > 1 else False
    x = [Input(shape[1:]) for shape in model.input_shape] if multi_input else Input(model.input_shape[1:])
    towers = []
    outputs = []
    for _ in range(len(model.outputs)):
        outputs.append([])
    for g in range(GPU_NUM):
        with tf.device('/gpu:' + str(g)):
            slice_g = [Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':GPU_NUM, 'part':g})(y) for y in x] \
                      if multi_input \
                      else Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':GPU_NUM, 'part':g})(x)
            output_model = model(slice_g)
            if multi_output:
                for num in range(len(output_model)):
                    outputs[num].append(output_model[num])
            else:
                 towers.append(output_model)

    with tf.device('/cpu:0'):
        merged = []
        if multi_output:
            merged = []
            for output in outputs:
                merged.append(merge(output, mode='concat', concat_axis=0))
        else:
            merged = merge(towers, mode='concat', concat_axis=0)

    model = Model(input= x if type(x) is list else [x], output=merged)
    model.compile(**kwargs)
    return model
项目:keras-steering-angle-visualizations    作者:jacobgil    | 项目源码 | 文件源码
def steering_net():
    model = Sequential()
    model.add(Convolution2D(24, 5, 5, init = normal_init, subsample= (2, 2), name='conv1_1', input_shape=(66, 200, 3)))
    model.add(Activation('relu'))
    model.add(Convolution2D(36, 5, 5, init = normal_init, subsample= (2, 2), name='conv2_1'))
    model.add(Activation('relu'))
    model.add(Convolution2D(48, 5, 5, init = normal_init, subsample= (2, 2), name='conv3_1'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_1'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_2'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(1164, init = normal_init, name = "dense_0"))
    model.add(Activation('relu'))
    #model.add(Dropout(p))
    model.add(Dense(100, init = normal_init,  name = "dense_1"))
    model.add(Activation('relu'))
    #model.add(Dropout(p))
    model.add(Dense(50, init = normal_init, name = "dense_2"))
    model.add(Activation('relu'))
    #model.add(Dropout(p))
    model.add(Dense(10, init = normal_init, name = "dense_3"))
    model.add(Activation('relu'))
    model.add(Dense(1, init = normal_init, name = "dense_4"))
    model.add(Lambda(atan_layer, output_shape = atan_layer_shape, name = "atan_0"))

    return model
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_lstm(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800

    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)


    in_dimension = 2
    seq = Sequential()
    seq.add(BatchNormalization(input_shape=((MAX_LENGTH,in_dimension))))
    seq.add(Masking([0]*in_dimension,input_shape=(MAX_LENGTH,in_dimension)))
    seq.add(LSTM(emb_size,return_sequences=False,input_shape=(MAX_LENGTH,in_dimension)))

    model = Sequential()
    model.add(Merge([con]+emb_list+[seq],mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point,output_shape=[2]))
    return model
项目:Keras-ResNeXt    作者:titu1994    | 项目源码 | 文件源码
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
        if K.image_data_format() == 'channels_last' else
        lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def get_tra_ori():
    img_input=Input(shape=(None, None, 1))
    theta = Lambda(orientation)(img_input)
    model = Model(inputs=[img_input,], outputs=[theta,])
    return model
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def get_tra_ori():
    img_input=Input(shape=(None, None, 1))
    theta = Lambda(orientation)(img_input)
    model = Model(inputs=[img_input,], outputs=[theta,])
    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(Lambda,
               kwargs={'function': lambda x, a, b: x * a + b,
                       'arguments': {'a': 0.6, 'b': 0.4}},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """


    def f(X):
        if K.image_dim_ordering()=='tf':
            b, r, c, ch = X.get_shape()
        else:
            b, ch, r, c = X.shape

        half = n // 2
        square = K.square(X)
        scale = k
        if K.image_dim_ordering() == 'th':
            extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1)), (0, half))
            extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
            for i in range(n):
                scale += alpha * extra_channels[:, i:i+ch, :, :]
        if K.image_dim_ordering() == 'tf':
            extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 3, 1, 2)), (half, 0))
            extra_channels = K.permute_dimensions(extra_channels, (0, 2, 3, 1))
            for i in range(n):
                scale += alpha * extra_channels[:, :, :, i:i+int(ch)]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def build_network(self, conf, model=None, input_shape=None, is_conv=True):
        """Build network"""
        _model = model
        model = Sequential()
        if _model is None:
            model.add(Lambda(lambda x: x,  input_shape=input_shape))
        else:
            model.add(_model)

        for x in conf:
            if x['is_drop']:
                model.add(Dropout(x['drop_rate']))
            if x['type'] is 'full':
                if is_conv:
                    model.add(Flatten())
                    is_conv = False
                model.add(Dense(x['n_feature']))
            elif x['type'] is 'conv':
                model.add(Convolution2D(nb_filter=x['n_feature'], 
                                        nb_row=x['kw'], 
                                        nb_col=1, 
                                        border_mode='same'))  
                is_conv=True
            if x['is_batch']:
                if x['type'] is 'full':
                    model.add(BatchNormalization(mode=1, axis=-1))
                if x['type'] is 'conv':
                    model.add(BatchNormalization(mode=2, axis=-1))
            model.add(x['activation'])
        return model
项目:keras-grad-cam    作者:jacobgil    | 项目源码 | 文件源码
def grad_cam(input_model, image, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    nb_classes = 1000
    target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output =  [l for l in model.layers[0].layers if l.name is layer_name][0].output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input], [conv_output, grads])

    output, grads_val = gradient_function([image])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis = (0, 1))
    cam = np.ones(output.shape[0 : 2], dtype = np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = cv2.resize(cam, (224, 224))
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255 * cam / np.max(cam)
    return np.uint8(cam), heatmap
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def train_model(x_tr, y_tr, conv_f_n):
    save_name = 'shape_match_model_epi.h5'
    tr_epoch = 15

    input_dim = x_tr.shape[2:]
    input_a = Input(shape=input_dim)
    input_b = Input(shape=input_dim)
    base_network = create_cnn_network(input_dim, conv_f_n)
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model_tr = Model(input=[input_a, input_b], output=distance)

    # train
    # opt_func = RMSprop(lr=.0005, clipnorm=1)
    opt_func = RMSprop()
    model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
    model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.25,
              batch_size=32, verbose=2, nb_epoch=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
    model_tr.save(save_name)
    return model_tr


# test, also provide info on which pair it was trained on and which it was tested on
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def train_model(x_tr, y_tr, conv_f_n, dense_n):
    save_name = '/home/nripesh/PycharmProjects/Siamese/siamese_supervised/shape_match_model_endo_k3_new.h5'
    tr_epoch = 10

    input_dim = x_tr.shape[2:]
    input_a = Input(shape=input_dim)
    input_b = Input(shape=input_dim)
    base_network = create_cnn_network(input_dim, conv_f_n, dense_n)
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model_tr = Model(inputs=[input_a, input_b], outputs=distance)

    # train
    # opt_func = RMSprop(lr=.0005, clipnorm=1)
    opt_func = RMSprop()
    model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
    model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
                 batch_size=128, verbose=2, epochs=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
    model_tr.save(save_name)
    return model_tr


# test, also provide info on which pair it was trained on and which it was tested on
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def train_model(x_tr, y_tr, conv_f_n):
    save_name = 'shape_match_model_endo.h5'
    tr_epoch = 15

    input_dim = x_tr.shape[2:]
    input_a = Input(shape=input_dim)
    input_b = Input(shape=input_dim)
    base_network = create_cnn_network(input_dim, conv_f_n)
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model_tr = Model(input=[input_a, input_b], output=distance)

    # train
    # opt_func = RMSprop(lr=.0005, clipnorm=1)
    opt_func = RMSprop()
    model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
    model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.25,
              batch_size=32, verbose=2, nb_epoch=tr_epoch, callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
    model_tr.save(save_name)
    return model_tr


# test, also provide info on which pair it was trained on and which it was tested on
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def train_model(x_tr, y_tr, conv_f_n, dense_n):
    save_name = '/home/nripesh/PycharmProjects/Siamese/siamese_supervised/shape_match_model_epi_sx4.h5'
    tr_epoch = 20

    input_dim = x_tr.shape[2:]
    input_a = Input(shape=input_dim)
    input_b = Input(shape=input_dim)
    base_network = create_cnn_network(input_dim, conv_f_n, dense_n)
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model_tr = Model(inputs=[input_a, input_b], outputs=distance)

    # train
    opt_func = RMSprop(lr=.003)
    model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
    history = model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
                           batch_size=128, verbose=2, epochs=tr_epoch,
                           callbacks=[EarlyStopping(monitor='val_loss', patience=2)])

    # summarize history for loss
    plt.figure(1)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    # plt.show()
    plt.savefig('/home/nripesh/PycharmProjects/Siamese/siamese_supervised/epi_train_val_loss.png')
    plt.close(1)
    model_tr.save(save_name)
    return model_tr


# test, also provide info on which pair it was trained on and which it was tested on
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def train_model(x_tr, y_tr, conv_f_n, dense_n):
    save_name = 'shape_match_model1.h5'
    tr_epoch = 10

    input_dim = x_tr.shape[2:]
    input_a = Input(shape=input_dim)
    input_b = Input(shape=input_dim)
    base_network = cnn_block_without_maxpool(input_dim, conv_f_n, dense_n)
    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model_tr = Model(input=[input_a, input_b], output=distance)

    # train
    # opt_func = RMSprop(lr=.0005, clipnorm=1)
    opt_func = RMSprop()
    model_tr.compile(loss=contrastive_loss, optimizer=opt_func)
    model_tr.fit([x_tr[:, 0], x_tr[:, 1]], y_tr, validation_split=.30,
                 batch_size=128, verbose=2, nb_epoch=tr_epoch,
                 callbacks=[EarlyStopping(monitor='val_loss', patience=2)])
    model_tr.save(save_name)
    return model_tr


# test, also provide info on which pair it was trained on and which it was tested on