Python keras.layers 模块,Permute() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用keras.layers.Permute()

项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Onet( weight_path = 'model48.h5'):
    input = Input(shape = [48,48,3])
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='prelu3')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
    x = PReLU(shared_axes=[1,2],name='prelu4')(x)
    x = Permute((3,2,1))(x)
    x = Flatten()(x)
    x = Dense(256, name='conv5') (x)
    x = PReLU(name='prelu5')(x)

    classifier = Dense(2, activation='softmax',name='conv6-1')(x)
    bbox_regress = Dense(4,name='conv6-2')(x)
    landmark_regress = Dense(10,name='conv6-3')(x)
    model = Model([input], [classifier, bbox_regress, landmark_regress])
    model.load_weights(weight_path, by_name=True)

    return model
项目:batchA3C    作者:ssamot    | 项目源码 | 文件源码
def build_network(num_actions, agent_history_length, resized_width, resized_height):
    state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height])

    inputs_v = Input(shape=(agent_history_length, resized_width, resized_height,))
    #model_v  = Permute((2, 3, 1))(inputs_v)

    model_v = Convolution2D(nb_filter=16, nb_row=8, nb_col=8, subsample=(4,4), activation='relu', border_mode='same')(inputs_v)
    model_v = Convolution2D(nb_filter=32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu', border_mode='same')(model_v)
    model_v = Flatten()(model_v)
    model_v = Dense(output_dim=512)(model_v)
    model_v = PReLU()(model_v)


    action_probs = Dense(name="p", output_dim=num_actions, activation='softmax')(model_v)

    state_value = Dense(name="v", output_dim=1, activation='linear')(model_v)


    value_network = Model(input=inputs_v, output=[state_value, action_probs])


    return state, value_network
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Rnet (weight_path = 'model24.h5'):
    input = Input(shape=[24, 24, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)

    x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)

    x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    x = Permute((3, 2, 1))(x)
    x = Flatten()(x)
    x = Dense(128, name='conv4')(x)
    x = PReLU( name='prelu4')(x)
    classifier = Dense(2, activation='softmax', name='conv5-1')(x)
    bbox_regress = Dense(4, name='conv5-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def arch_attention(embedding_layer, sequence_length, classes):    
    tweet_input = Input(shape=(sequence_length,), dtype='int32')        
    embedded_tweet = embedding_layer(tweet_input)

    activations = LSTM(128, return_sequences=True, name='recurrent_layer')(embedded_tweet)

    attention = TimeDistributed(Dense(1, activation='tanh'))(activations) 
    attention = Flatten()(attention)
    attention = Activation('softmax')(attention)
    attention = RepeatVector(128)(attention)
    attention = Permute([2, 1], name='attention_layer')(attention)

    sent_representation = merge([activations, attention], mode='mul')
    sent_representation = Lambda(lambda xin: K.sum(xin, axis=1), name='merged_layer')(sent_representation)

    tweet_output = Dense(classes, activation='softmax', name='predictions')(sent_representation)      

    tweetnet = Model(tweet_input, tweet_output)
    tweetnet.compile(optimizer='adam',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])    
    return tweetnet
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def arch_attention36(embedding_layer, sequence_length, classes):    
    tweet_input = Input(shape=(sequence_length,), dtype='int32')        
    embedded_tweet = embedding_layer(tweet_input)

    activations = LSTM(36, return_sequences=True, name='recurrent_layer')(embedded_tweet)

    attention = TimeDistributed(Dense(1, activation='tanh'))(activations) 
    attention = Flatten()(attention)
    attention = Activation('softmax')(attention)
    attention = RepeatVector(36)(attention)
    attention = Permute([2, 1], name='attention_layer')(attention)

    sent_representation = merge([activations, attention], mode='mul')
    sent_representation = Lambda(lambda xin: K.sum(xin, axis=1), name='merged_layer')(sent_representation)

    tweet_output = Dense(classes, activation='softmax', name='output_layer')(sent_representation)      

    tweetnet = Model(tweet_input, tweet_output)
    tweetnet.compile(optimizer='adam',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])    
    return tweetnet
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        model = Sequential()
        model.add(Permute((2, 1), input_shape=(10, 64)))
        model.build()
        self.keras_type_test(model, 0, 'Permute')
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['Permute']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = permute(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Permute')
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def permute(layer, layer_in, layerId):
    out = {layerId: Permute(map(int, layer['params']['dim'].split(',')))(*layer_in)}
    return out
项目:ParseLawDocuments    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def Mem_Model(story_maxlen,query_maxlen,vocab_size):
    input_encoder_m=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_m')

    x=Embedding(output_dim=64,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_m)

    x=Dropout(0.5)(x)

    question_encoder=Input(shape=(query_maxlen,),dtype='int32',name='question_encoder')

    y=Embedding(output_dim=64,input_dim=vocab_size,input_length=query_maxlen)(question_encoder)

    y=Dropout(0.5)(y)

    z=merge([x,y],mode='dot',dot_axes=[2,2])
    # z=merge([x,y],mode='sum')

    match=Activation('softmax')(z)

    input_encoder_c=Input(shape=(story_maxlen,),dtype='int32',name='input_encoder_c')

    c=Embedding(output_dim=query_maxlen,input_dim=vocab_size,input_length=story_maxlen)(input_encoder_c)

    c=Dropout(0.5)(c)

    response=merge([match,c],mode='sum')

    w=Permute((2,1))(response)

    answer=merge([w,y],mode='concat',concat_axis=-1)

    lstm=LSTM(32)(answer)

    lstm=Dropout(0.5)(lstm)

    main_loss=Dense(50,activation='sigmoid',name='main_output')(lstm)

    model=Model(input=[input_encoder_m,question_encoder,input_encoder_c],output=main_loss)
    return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_permute(self):
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(4, 3, 2)))

        # When input blob is 3D array (D1, D2, D3), Keras assumes the axes' meaning is
        # (D1=H,D2=W,D3=C), while CoreML assumes (D1=C,D2=H,D3=W). However,
        # it's unclear after permutation, what the axes' meaning is for the output blob.
        # Since permutation done on (H,W,C) blobs usually is usually followed by
        # recurrent layers / Dense, we choose that the ouput axis order of CoreML is
        # the same as Keras after permutation.
        self._test_keras_model(model, transpose_keras_result=False)
项目:ParseLawDocuments    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def Mem_Model2(story_maxlen,query_maxlen,vocab_size):
    input_encoder_m = Sequential()
    input_encoder_m.add(Embedding(input_dim=vocab_size,
                                  output_dim=128,
                                  input_length=story_maxlen))
    input_encoder_m.add(Dropout(0.5))
    # output: (samples, story_maxlen, embedding_dim)
    # embed the question into a sequence of vectors
    question_encoder = Sequential()
    question_encoder.add(Embedding(input_dim=vocab_size,
                                   output_dim=128,
                                   input_length=query_maxlen))
    question_encoder.add(Dropout(0.5))
    # output: (samples, query_maxlen, embedding_dim)
    # compute a 'match' between input sequence elements (which are vectors)
    # and the question vector sequence
    match = Sequential()
    match.add(Merge([input_encoder_m, question_encoder],
                    mode='dot',
                    dot_axes=[2, 2]))
    match.add(Activation('softmax'))

    plot(match,to_file='model_1.png')

    # output: (samples, story_maxlen, query_maxlen)
    # embed the input into a single vector with size = story_maxlen:
    input_encoder_c = Sequential()
    # input_encoder_c.add(Embedding(input_dim=vocab_size,
    #                               output_dim=query_maxlen,
    #                               input_length=story_maxlen))
    input_encoder_c.add(Embedding(input_dim=vocab_size,
                                  output_dim=query_maxlen,
                                  input_length=story_maxlen))
    input_encoder_c.add(Dropout(0.5))
    # output: (samples, story_maxlen, query_maxlen)
    # sum the match vector with the input vector:
    response = Sequential()
    response.add(Merge([match, input_encoder_c], mode='sum'))
    # output: (samples, story_maxlen, query_maxlen)
    response.add(Permute((2, 1)))  # output: (samples, query_maxlen, story_maxlen)

    plot(response,to_file='model_2.png')

    # concatenate the match vector with the question vector,
    # and do logistic regression on top
    answer = Sequential()
    answer.add(Merge([response, question_encoder], mode='concat', concat_axis=-1))
    # the original paper uses a matrix multiplication for this reduction step.
    # we choose to use a RNN instead.
    answer.add(LSTM(64))
    # one regularization layer -- more would probably be needed.
    answer.add(Dropout(0.5))
    answer.add(Dense(50))
    # we output a probability distribution over the vocabulary
    answer.add(Activation('sigmoid'))

    return answer

# ??????? ?????k???1
项目:CNN_LSTM    作者:FrankBlood    | 项目源码 | 文件源码
def basic_attention(nb_words=10000, EMBEDDING_DIM=300, \
                    MAX_SEQUENCE_LENGTH=40, \
                    num_rnn=300, num_dense=300, rate_drop_rnn=0.25, \
                    rate_drop_dense=0.25, act='relu'):
    embedding_layer = Embedding(nb_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH)
    rnn_layer = Bidirectional(GRU(num_rnn, dropout=rate_drop_rnn, recurrent_dropout=rate_drop_rnn, return_sequences=True))
    attention_W = TimeDistributed(Dense(350, activation='tanh'))
    attention_w = TimeDistributed(Dense(1))
    attention_softmax = Activation('softmax')
    attention_sum = Lambda(lambda x: K.sum(x, axis=1))

    sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_1 = embedding_layer(sequence_1_input)
    x1 = rnn_layer(embedded_sequences_1)

    attention1 = attention_W(x1)
    attention1 = attention_w(attention1)
    attention1 = attention_softmax(attention1)
    attention1 = Permute([2, 1])(attention1)
    x1 = Permute([2, 1])(x1)
    x1 = multiply([attention1, x1])
    x1 = Permute([2, 1])(x1)
    x1 = attention_sum(x1)

    sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_2 = embedding_layer(sequence_2_input)
    x2 = rnn_layer(embedded_sequences_2)

    attention2 = attention_W(x2)
    attention2 = attention_w(attention2)
    attention2 = attention_softmax(attention2)
    attention2 = Permute([2, 1])(attention2)
    x2 = Permute([2, 1])(x2)
    x2 = multiply([attention2, x2])
    x2 = Permute([2, 1])(x2)
    x2 = attention_sum(x2)

    merged = multiply([x1, x2])
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    merged = Dense(num_dense, activation=act)(merged)
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    preds = Dense(1, activation='sigmoid')(merged)

    ########################################
    ## train the model
    ########################################
    model = Model(inputs=[sequence_1_input, sequence_2_input], outputs=preds)
    model.compile(loss='binary_crossentropy',
              optimizer='nadam',
              metrics=['acc'])
    model.summary()
    # print(STAMP)
    return model