Python keras.layers 模块,concatenate() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.concatenate()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def cnn_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        outputs = []
        for i in range(len(self.kernel_sizes)):
            output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
                              kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim,), name="noise_input")
    aux_input = Input(shape=(aux_dim,), name="auxilary_input")
    x = concatenate([gen_input, aux_input], axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x) 
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x) # shape is 100 x 10

    # Conv Layer 1
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 250
    x = UpSampling1D(size=2)(x) # output shape is 200 x 250

    # Conv Layer 2
    x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 200 x 100
    x = UpSampling1D(size=2)(x) # output shape is 400 x 100

    # Conv Layer 3
    x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x) # final output shape is 400 x 1

    generator_model = Model(
        outputs=[x], inputs=[gen_input, aux_input], name=model_name)

    return generator_model
项目:Cat-Segmentation    作者:ardamavi    | 项目源码 | 文件源码
def get_model():

    inputs = Input(shape=(64, 64, 3))

    conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs)
    act_1 = Activation('relu')(conv_1)

    conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1)
    act_2 = Activation('relu')(conv_2)

    deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2)
    act_3 = Activation('relu')(deconv_1)

    merge_1 = concatenate([act_3, act_1], axis=3)

    deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1)
    act_4 = Activation('relu')(deconv_2)

    model = Model(inputs=[inputs], outputs=[act_4])

    model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef])

    return model
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def create_actor_network(self, state_size,action_dim):
        print("Now we build the model")

        # Batch norm version
        S = Input(shape=[state_size])
        s1 = BatchNormalization()(S)
        s1 = Dense(HIDDEN1_UNITS)(s1)
        s1 = BatchNormalization()(s1)
        s1 = Activation('relu')(s1)
        s1 = Dense(HIDDEN2_UNITS)(s1)
        s1 = BatchNormalization()(s1)
        h1 = Activation('relu')(s1)

        Steering = Dense(1,activation='tanh')(h1)  
        Acceleration = Dense(1,activation='sigmoid')(h1)   
        Brake = Dense(1,activation='sigmoid')(h1)
        # V = merge([Steering,Acceleration,Brake],mode='concat')
        V = layers.concatenate([Steering,Acceleration,Brake])          
        model = Model(inputs=S,outputs=V)
        return model, model.trainable_weights, S
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47,), name="auxilary_input")

    # Conv Layer 1
    x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 400
    x = AveragePooling1D(pool_size=20)(x) # ouput shape is 100 x 20

    # Conv Layer 2
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 250 x 20
    x = AveragePooling1D(pool_size=5)(x) # output shape is 250 x 4

    # Conv Layer 3
    x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 300 x 4
    x = Flatten()(x) # output shape is 1200

    x = concatenate([x, aux_input], axis=-1) # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x) # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    discriminator_model = Model(
        outputs=[x], inputs=[disc_input, aux_input], name=model_name)

    return discriminator_model
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def load_data(data_dir, num_files=30):
    files_list = os.listdir(data_dir)
    data = None
    ac_data = None
    for fname in files_list[:num_files]:
        print fname
        f = os.path.join(data_dir, fname)
        with netcdf.netcdf_file(f, 'r') as fid:
            m = fid.variables['outputMeans'][:].copy()
            s = fid.variables['outputStdevs'][:].copy()
            feats = fid.variables['targetPatterns'][:].copy()
            ac_feats = fid.variables['inputs'][:].copy()
            scaler = preprocessing.StandardScaler()
            scaler.mean_ = m
            scaler.scale_ = s
            feats = scaler.inverse_transform(feats)
            assert feats.shape[0] == ac_feats.shape[0]
            # feats = np.concatenate((feats,ac_feats),axis=1)
        if data == None and ac_data == None:
            data = feats
            ac_data = ac_feats
        else:
            data = np.vstack((data, feats))
            ac_data = np.vstack((ac_data, ac_feats))
    return data, ac_data
项目:keras-squeezenet    作者:rcmalli    | 项目源码 | 文件源码
def fire_module(x, fire_id, squeeze=16, expand=64):
    s_id = 'fire' + str(fire_id) + '/'

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
    x = Activation('relu', name=s_id + relu + sq1x1)(x)

    left = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
    left = Activation('relu', name=s_id + relu + exp1x1)(left)

    right = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
    right = Activation('relu', name=s_id + relu + exp3x3)(right)

    x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
    return x


# Original SqueezeNet from paper.
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_discriminator(self):

        z = Input(shape=(self.latent_dim, ))
        img = Input(shape=self.img_shape)
        d_in = concatenate([z, Flatten()(img)])

        model = Dense(1024)(d_in)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        validity = Dense(1, activation="sigmoid")(model)

        return Model([z, img], validity)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_concat_random(self):
        np.random.seed(1988)
        input_dim = 10
        num_channels = 6

        # Define a model
        input_tensor = Input(shape = (input_dim, ))
        x1 = Dense(num_channels)(input_tensor)
        x2 = Dense(num_channels)(x1)
        x3 = Dense(num_channels)(x1)
        x4 = concatenate([x2, x3])
        x5 = Dense(num_channels)(x4)

        model = Model(inputs=[input_tensor], outputs=[x5])

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_concat_seq_random(self):
        np.random.seed(1988)
        max_features = 10
        embedding_dims = 4
        seq_len = 5
        num_channels = 6

        # Define a model
        input_tensor = Input(shape = (seq_len, ))
        x1 = Embedding(max_features, embedding_dims)(input_tensor)
        x2 = Embedding(max_features, embedding_dims)(input_tensor)
        x3 = concatenate([x1, x2], axis=1)

        model = Model(inputs=[input_tensor], outputs=[x3])

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model, one_dim_seq_flags=[True])
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_shared_vision(self):
        digit_input = Input(shape=(27, 27,1))
        x = Conv2D(64, (3, 3))(digit_input)
        x = Conv2D(64, (3, 3))(x)
        out = Flatten()(x)

        vision_model = Model(inputs=[digit_input], outputs=[out])

        # then define the tell-digits-apart model
        digit_a = Input(shape=(27,27,1))
        digit_b = Input(shape=(27,27,1))

        # the vision model will be shared, weights and all
        out_a = vision_model(digit_a)
        out_b = vision_model(digit_b)

        concatenated = concatenate([out_a, out_b])
        out = Dense(1, activation='sigmoid')(concatenated)
        model = Model(inputs=[digit_a, digit_b], outputs=out)
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dense_elementwise_params(self):
        options = dict(
            modes = [add, multiply, concatenate, average, maximum]
        )
        def build_model(mode):
            x1 = Input(shape=(3,))
            x2 = Input(shape=(3,))
            y1 = Dense(4)(x1)
            y2 = Dense(4)(x2)
            z = mode([y1, y2])
            model = Model([x1,x2], z)
            return mode, model

        product = itertools.product(*options.values())
        args = [build_model(p[0]) for p in product]
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_image_captioning_feature_merge(self):

        img_input_1 = Input(shape=(16,16,3))
        x = Conv2D(2,(3,3))(img_input_1)
        x = Flatten()(x)
        img_model = Model([img_input_1], [x])

        img_input = Input(shape=(16,16,3))
        x = img_model(img_input)
        x = Dense(8, name = 'cap_dense')(x)
        x = Reshape((1,8), name = 'cap_reshape')(x)

        sentence_input = Input(shape=(5,)) # max_length = 5
        y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
        z = concatenate([x,y], axis = 1, name = 'cap_merge')

        combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
        self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_image_captioning(self):
        # use a conv layer as a image feature branch
        img_input_1 = Input(shape=(16,16,3))
        x = Conv2D(2,(3,3))(img_input_1)
        x = Flatten()(x)
        img_model = Model(inputs=[img_input_1], outputs=[x])

        img_input = Input(shape=(16,16,3))
        x = img_model(img_input)
        x = Dense(8, name = 'cap_dense')(x)
        x = Reshape((1,8), name = 'cap_reshape')(x)

        sentence_input = Input(shape=(5,)) # max_length = 5
        y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
        z = concatenate([x,y], axis = 1, name = 'cap_merge')
        z = LSTM(4, return_sequences = True, name = 'cap_lstm')(z)
        z = TimeDistributed(Dense(8), name = 'cap_timedistributed')(z)

        combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
        self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
项目:mtl    作者:zhenhongChen    | 项目源码 | 文件源码
def global_handle(self, emb_layer, flag):

        fw_lstm_out = self.forward_lstm(emb_layer)
        bw_lstm_out = self.backward_lstm(emb_layer)
        conv_out = self.conv_dropout(self.conv(emb_layer))

        fw_lstm_out = TimeDistributed(Dense(self.params['attention_dim']), name='fw_tb_'+flag)(fw_lstm_out)
        fw_lstm_att = Attention()(fw_lstm_out)
        # fw_lstm_att = Reshape((self.params['lstm_output_dim'], 1))(fw_lstm_att)

        conv_out = TimeDistributed(Dense(self.params['attention_dim']), name='conv_tb_'+flag)(conv_out)
        conv_att = Attention()(conv_out)
        # conv_att = Reshape((self.params['filters'], 1))(conv_att)

        bw_lstm_out = TimeDistributed(Dense(self.params['attention_dim']), name='bw_tb_'+flag)(bw_lstm_out)
        bw_lstm_att = Attention()(bw_lstm_out)
        # bw_lstm_att = Reshape((self.params['lstm_output_dim'], 1))(bw_lstm_att)

        return concatenate([fw_lstm_att, conv_att, bw_lstm_att], axis=2)
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def attention_step(
        self,
        attended,
        attention_states,
        step_input,
        recurrent_states
    ):
        [attention_tm1, kappa_tm1] = attention_states
        params = self.params_layer(
            concatenate([step_input, recurrent_states[0]])
        )
        attention, kappa = self._get_attention_and_kappa(
            attended,
            params,
            kappa_tm1
        )
        return attention, [attention, kappa]
项目:recurrentshop    作者:farizrahman4u    | 项目源码 | 文件源码
def QRNcell():
    xq = Input(batch_shape=(batch_size, embedding_dim * 2))
    # Split into context and query
    xt = Lambda(lambda x, dim: x[:, :dim], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)
    qt = Lambda(lambda x, dim: x[:, dim:], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)

    h_tm1 = Input(batch_shape=(batch_size, embedding_dim))

    zt = Dense(1, activation='sigmoid', bias_initializer=Constant(2.5))(multiply([xt, qt]))
    zt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(zt)
    ch = Dense(embedding_dim, activation='tanh')(concatenate([xt, qt], axis=-1))
    rt = Dense(1, activation='sigmoid')(multiply([xt, qt]))
    rt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(rt)
    ht = add([multiply([zt, ch, rt]), multiply([Lambda(lambda x: 1 - x, output_shape=lambda s: s)(zt), h_tm1])])
    return RecurrentModel(input=xq, output=ht, initial_states=[h_tm1], final_states=[ht], return_sequences=True)


#
# Load data
#
项目:recurrentshop    作者:farizrahman4u    | 项目源码 | 文件源码
def generate_data(num_samples, max_len):
    values = np.random.normal(size=[num_samples, max_len, 1])
    mask = np.zeros([num_samples, max_len, 1])
    answers = np.zeros([num_samples, 1])

    for i in range(num_samples):
        j1, j2 = 0, 0
        while j1 == j2:
            j1 = np.random.randint(max_len)
            j2 = np.random.randint(max_len)
        mask[i, (j1, j2)] = 1.0
        answers[i] = np.sum(values[i]*mask[i])
    data = np.concatenate((values, mask), 2)
    return data, answers


#####################################################################
# RWA layer
#####################################################################
项目:yolov2    作者:datlife    | 项目源码 | 文件源码
def yolov2_detector(feature_map,
                    fine_grained_layers):
    """
        Original YOLOv2 Implementation

    :param feature_map:
    :param fine_grained_layers:
    :param conv_block_func:
    :return:
    """
    layer = fine_grained_layers[0]

    x = conv_block(feature_map, 1024, (3, 3))
    x = conv_block(x, 1024, (3, 3))
    x2 = x

    connected_layer = conv_block(layer, 64, (1, 1))
    rerouted_layer  = Reroute(block_size=2,
                              name='space_to_depth_x2')(connected_layer)

    x = concatenate([rerouted_layer, x2])
    x = conv_block(x, 1024, (3, 3))

    return x
项目:yolov2    作者:datlife    | 项目源码 | 文件源码
def mobilenet_detector(feature_map,
                       fine_grained_layers):
    """
    MobileNet Detector Implementation
    :param feature_extractor:
    :param num_classes:
    :param num_anchors:
    :param fine_grained_layers

    :return:
    """
    x = _depthwise_conv_block(feature_map, 1024, 1.0, block_id=14)
    x = _depthwise_conv_block(x, 1024, 1.0, block_id=15)

    # Reroute
    concat_layers = [x]
    for layer in fine_grained_layers:
        connected_layer = _depthwise_conv_block(layer, 64, (1, 1))
        rerouted_layer = Reroute(block_size=2, name='space_to_depth_x2')(connected_layer)
        concat_layers.append(rerouted_layer)

    x = concatenate(concat_layers)
    x = _depthwise_conv_block(x, 1024, (3, 3))

    return x
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def create_actor_network(self, state_size,action_dim):
        print("Now we build the model")
        S = Input(shape=[state_size])   
        h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
        h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
        # Steering = Dense(1,activation='tanh',init=lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)  
        # Acceleration = Dense(1,activation='sigmoid',lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)   
        # Brake = Dense(1,activation='sigmoid',lambda shape:VarianceScaling(scale=1e-4)(shape))(h1)
        Steering = Dense(1,activation='tanh')(h1)  
        Acceleration = Dense(1,activation='sigmoid')(h1)   
        Brake = Dense(1,activation='sigmoid')(h1)
        # V = merge([Steering,Acceleration,Brake],mode='concat')
        V = layers.concatenate([Steering,Acceleration,Brake])          
        model = Model(inputs=S,outputs=V)
        return model, model.trainable_weights, S
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def create_actor_network(self, state_size,action_dim):
        ## original version
        print("Now we build the model")
        S = Input(shape=[state_size])   
        h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)
        h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)

        Steering = Dense(1,activation='tanh')(h1)  
        Acceleration = Dense(1,activation='sigmoid')(h1)   
        Brake = Dense(1,activation='sigmoid')(h1)
        # V = merge([Steering,Acceleration,Brake],mode='concat')
        V = layers.concatenate([Steering,Acceleration,Brake])          
        model = Model(inputs=S,outputs=V)
        return model, model.trainable_weights, S
项目:Image-Caption-Generator    作者:abi-aryan    | 项目源码 | 文件源码
def create_model(config_dict,
                 compile_model=True):
    image_inputs = Input(shape=(4096,), name="image_model_input")
    image_model = _create_image_model(config_dict=config_dict,
                                      image_inputs=image_inputs)

    language_inputs = Input(shape=(config_dict['max_caption_length'],),
                            name="language_model_input")
    language_model = _create_language_model(config_dict=config_dict,
                                            language_inputs=language_inputs)

    merged_input = concatenate([image_model, language_model],
                               name="concatenate_image_language")
    merged_input = LSTM(1000,
                        return_sequences=False,
                        name="merged_model_lstm")(merged_input)
    softmax_output = Dense(units=config_dict["vocabulary_size"],
                           activation="softmax",
                           name="merged_model_softmax")(merged_input)
    model = Model(inputs=[image_inputs,
                          language_inputs], outputs=softmax_output)
    print(model.summary())
    if (compile_model == True):
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
    return model
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def load_test_data(data_dir, mean_vec, std_vec):
    files_list = os.listdir(data_dir)
    ac_data = None
    for f in files_list:
        fname, ext = os.path.splitext(f)
        if ext == '.lf0':
            print fname
            lf0_file = os.path.join(data_dir, f)
            gain_file = os.path.join(data_dir, fname + '.gain')
            lsf_file = os.path.join(data_dir, fname + '.lsf')
            slsf_file = os.path.join(data_dir, fname + '.slsf')
            hnr_file = os.path.join(data_dir, fname + '.hnr')
            lf0_data = read_binary_file(lf0_file, dim=1)
            lsf_data = read_binary_file(lsf_file, dim=30)
            slsf_data = read_binary_file(slsf_file, dim=10)
            hnr_data = read_binary_file(hnr_file, dim=5)
            gain_data = read_binary_file(gain_file, dim=1)
            print lsf_data.shape, gain_data.shape, lf0_data.shape, hnr_data.shape, slsf_data.shape
            # [lsf gain lf0 hnr slsf]
            data = np.concatenate(
                (lsf_data, gain_data, lf0_data, hnr_data, slsf_data), axis=1)
            print data.shape
            scaler = preprocessing.StandardScaler()
            scaler.mean_ = mean_vec
            scaler.scale_ = std_vec
            data = scaler.transform(data)
            out_file = os.path.join(data_dir, fname + '.cmp')
            with open(out_file, 'w') as fid:
                data.tofile(fid)
    return ac_data
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        img_input = Input((224, 224, 3))
        model = Conv2D(64, (3, 3), padding='same')(img_input)
        model = concatenate([img_input, model])
        model = Model(img_input, model)
        self.keras_type_test(model, 0, 'Concat')
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def concat(layer, layer_in, layerId):
    out = {layerId: concatenate(layer_in)}
    return out


# ********** Noise Layers **********
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
    """ Element-wise features from the pair fed to an MLP. """
    linear = Activation('linear')
    if sum_mode == 'absdiff':
        absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
                         output_shape=lambda shape: shape[0])
        # model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
        mlp_inputs = absdiff(inputs)
    elif sum_mode == 'sum':
        outsum = linear(add(inputs))
        outmul = linear(multiply(inputs))
        mlp_inputs = [outsum, outmul] + extra_inp

    def mlp_args(mlp_inputs):
        """ return model.add_node() args that are good for mlp_inputs list
        of both length 1 and more than 1. """
        if isinstance(mlp_inputs, list):
            mlp_inputs = concatenate(mlp_inputs)
        return mlp_inputs

    # Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
    # list (multiple hidden layers)
    if Ddim == 0:
        mlp_inputs = mlp_args(mlp_inputs)
        Ddim = []
    elif not isinstance(Ddim, list):
        Ddim = [Ddim]
    if Ddim:
        for i, D in enumerate(Ddim):
            mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
            # model.add_node(name=pfx+'hdn[%d]'%(i,),
            #                layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
            #                **mlp_args(mlp_inputs))
            # mlp_inputs = [pfx+'hdn[%d]'%(i,)]
    outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
    return outmlp
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
    """ Element-wise features from the pair fed to an MLP. """
    linear = Activation('linear')
    if sum_mode == 'absdiff':
        absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
                         output_shape=lambda shape: shape[0])
        # model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
        mlp_inputs = absdiff(inputs)
    elif sum_mode == 'sum':
        outsum = linear(add(inputs))
        outmul = linear(multiply(inputs))
        mlp_inputs = [outsum, outmul] + extra_inp

    def mlp_args(mlp_inputs):
        """ return model.add_node() args that are good for mlp_inputs list
        of both length 1 and more than 1. """
        if isinstance(mlp_inputs, list):
            mlp_inputs = concatenate(mlp_inputs)
        return mlp_inputs

    # Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
    # list (multiple hidden layers)
    if Ddim == 0:
        mlp_inputs = mlp_args(mlp_inputs)
        Ddim = []
    elif not isinstance(Ddim, list):
        Ddim = [Ddim]
    if Ddim:
        for i, D in enumerate(Ddim):
            mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
            # model.add_node(name=pfx+'hdn[%d]'%(i,),
            #                layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
            #                **mlp_args(mlp_inputs))
            # mlp_inputs = [pfx+'hdn[%d]'%(i,)]
    outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
    return outmlp
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_concat_model(self, emb_dim, emb_path, vocab_word,
                            vocab_word_size, word_maxlen, vocab_char_size,
                            char_maxlen):
        from aes.layers import Conv1DMask, MaxPooling1DMask
        from keras.layers import concatenate
        logger.info('Building concatenation model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        char_cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(char_emb)
        char_input = MaxPooling1DMask(
            pool_size=char_maxlen / word_maxlen, padding='same')(char_cnn)
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_input = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        merged = concatenate([char_input, word_input], axis=1)
        merged_dropped = Dropout(0.5)(merged)
        final_input = Dense(50)(merged_dropped)
        cnn = Conv1DMask(
            filters=emb_dim, kernel_size=3, padding='same')(final_input)
        dropped = Dropout(0.5)(cnn)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=[input_char, input_word], outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def inception_block(input_data, idx):
    tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu', name='incep_conv_1a_' + str(idx))(input_data)
    tower_1 = Conv2D(64 / 3, (3, 3), padding='same', activation='relu', name='incep_conv_1b_' + str(idx))(tower_1)

    tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu', name='incep_conv_2a_' + str(idx))(input_data)
    tower_2 = Conv2D(64 / 3, (5, 5), padding='same', activation='relu', name='incep_conv_2b_' + str(idx))(tower_2)

    tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='incep_conv_3a_' + str(idx))(input_data)
    tower_3 = Conv2D(64 / 3 + 1, (1, 1), padding='same', activation='relu', name='incep_conv_3b_' + str(idx))(tower_3)

    output = concatenate([tower_1, tower_2, tower_3], axis=3)

    return output
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def save_imgs(self, epoch):
        r, c = 10, 10

        fig, axs = plt.subplots(r, c)
        for i in range(r):
            sampled_noise, sampled_labels, sampled_cont = self.sample_generator_input(c)
            gen_input = np.concatenate((sampled_noise, sampled_labels, sampled_cont), axis=1)
            gen_imgs = self.generator.predict(gen_input)
            gen_imgs = 0.5 * gen_imgs + 0.5
            for j in range(c):
                axs[i,j].imshow(gen_imgs[j,:,:,0], cmap='gray')
                axs[i,j].axis('off')
        fig.savefig("./infogan/images/mnist_%d.png" % epoch)
        plt.close()
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dangling_merge_left(self):

        x1 = Input(shape=(4,), name = 'input1')
        x2 = Input(shape=(5,), name = 'input2')
        y1 = Dense(6, name = 'dense')(x2)
        z = concatenate([x1, y1])
        model = Model(inputs = [x1,x2], outputs = [z])

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dangling_merge_right(self):

        x1 = Input(shape=(4,), name = 'input1')
        x2 = Input(shape=(5,), name = 'input2')
        y1 = Dense(6, name = 'dense')(x2)
        z = concatenate([y1, x1])
        model = Model(inputs = [x1,x2], outputs = [z])

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
项目:Jetson-RaceCar-AI    作者:ardamavi    | 项目源码 | 文件源码
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model
项目:HyPRec    作者:mostafa-mahmoud    | 项目源码 | 文件源码
def get_cnn(self):
        """
        Build a keras' convolutional neural network model.

        :returns: A tuple of 2 models, for encoding and encoding+decoding model.
        :rtype: tuple(Model)
        """
        n_vocab = self.abstracts_preprocessor.get_num_vocab()
        n1 = 64
        input_layer = Input(shape=(n_vocab,))
        model = Reshape((1, n_vocab,))(input_layer)
        model = Convolution1D(n1, 3, border_mode='same', activation='sigmoid', W_regularizer=l2(.01))(model)
        model = Reshape((n1,))(model)
        model = Dense(n1, activation='sigmoid', W_regularizer=l2(.01))(model)
        model = Reshape((1, n1))(model)
        model = Convolution1D(self.n_factors, 3, border_mode='same',
                              activation='softmax', W_regularizer=l2(.01))(model)
        encoding = Reshape((self.n_factors,), name='encoding')(model)

        model = Reshape((1, self.n_factors))(encoding)
        model = Convolution1D(n1, 3, border_mode='same', activation='sigmoid', W_regularizer=l2(.01))(model)
        model = Reshape((n1,))(model)
        model = Dense(n1, activation='relu', W_regularizer=l2(.01))(model)
        model = Reshape((1, n1))(model)
        model = Convolution1D(n_vocab, 3, border_mode='same', W_regularizer=l2(.01))(model)
        decoding = Reshape((n_vocab,))(model)

        model = concatenate([encoding, decoding])
        self.model = Model(inputs=input_layer, outputs=model)
        self.model.compile(loss='mean_squared_error', optimizer='sgd')
项目:HyPRec    作者:mostafa-mahmoud    | 项目源码 | 文件源码
def train_sdae(self, X, y, std=0.25):
        """
        Train the stacked denoising autoencoders.

        :param ndarray X: input of the SDAE
        :param ndarray y: Target of the SDAE
        :param float std: The standard deviation of the noising of clean input.
        :returns: The loss of the training
        :rtype: float
        """
        return self.model.train_on_batch(X, numpy.concatenate((y, numpy.random.normal(X, std)), axis=1))
项目:HyPRec    作者:mostafa-mahmoud    | 项目源码 | 文件源码
def evaluate_sdae(self, X, y):
        """
        Compute the loss of the encoding of the stacked denoising autoencoders.

        :param ndarray X: input of the SDAE
        :param ndarray y: Target of the SDAE
        :returns: The encoded latent representation of X
        :rtype: ndarray
        """
        return self.model.evaluate(X, numpy.concatenate((y, X), axis=1))
项目:mtl    作者:zhenhongChen    | 项目源码 | 文件源码
def __call__(self, emb_layer):
        fw_lstm_out = self.forward_lstm(emb_layer)
        bw_lstm_out = self.backward_lstm(emb_layer)
        conv_out = self.conv_dropout(self.conv(emb_layer))

        return concatenate([fw_lstm_out, conv_out, bw_lstm_out], axis=2)
项目:mtl    作者:zhenhongChen    | 项目源码 | 文件源码
def local_handle(self, emb_layer):
        fw_lstm_out = self.forward_lstm(emb_layer)
        bw_lstm_out = self.backward_lstm(emb_layer)
        conv_out = self.conv_dropout(self.conv(emb_layer))

        return concatenate([fw_lstm_out, conv_out, bw_lstm_out], axis=2)
项目:Image-Caption-Generator    作者:shagunsodhani    | 项目源码 | 文件源码
def create_model(config_dict,
                 compile_model=True):
    image_inputs = Input(shape=(4096,), name="image_model_input")
    image_model = _create_image_model(config_dict=config_dict,
                                      image_inputs=image_inputs)

    language_inputs = Input(shape=(config_dict['max_caption_length'],),
                            name="language_model_input")
    language_model = _create_language_model(config_dict=config_dict,
                                            language_inputs=language_inputs)

    merged_input = concatenate([image_model, language_model],
                               name="concatenate_image_language")
    merged_input = LSTM(1000,
                        return_sequences=False,
                        name="merged_model_lstm")(merged_input)
    softmax_output = Dense(units=config_dict["vocabulary_size"],
                           activation="softmax",
                           name="merged_model_softmax")(merged_input)
    model = Model(inputs=[image_inputs,
                          language_inputs], outputs=softmax_output)
    print(model.summary())
    if (compile_model == True):
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
    return model
项目:Keras-DualPathNetworks    作者:titu1994    | 项目源码 | 文件源码
def _grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = Activation('relu')(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
                   if K.image_data_format() == 'channels_last' else
                   lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    group_merge = BatchNormalization(axis=channel_axis)(group_merge)
    group_merge = Activation('relu')(group_merge)
    return group_merge
项目:tslearn    作者:rtavenar    | 项目源码 | 文件源码
def _set_model_layers(self, X, ts_sz, d, n_classes):
        inputs = [Input(shape=(ts_sz, 1), name="input_%d" % di) for di in range(d)]
        shapelet_sizes = sorted(self.n_shapelets_per_size.keys())
        pool_layers = []
        pool_layers_locations = []
        for i, sz in enumerate(sorted(shapelet_sizes)):
            transformer_layers = [Conv1D(filters=sz,
                                         kernel_size=sz,
                                         trainable=False,
                                         use_bias=False,
                                         name="false_conv_%d_%d" % (i, di))(inputs[di]) for di in range(d)]
            shapelet_layers = [LocalSquaredDistanceLayer(self.n_shapelets_per_size[sz],
                                                         X=X,
                                                         name="shapelets_%d_%d" % (i, di))(transformer_layers[di])
                               for di in range(d)]
            if d == 1:
                summed_shapelet_layer = shapelet_layers[0]
            else:
                summed_shapelet_layer = add(shapelet_layers)
            pool_layers.append(GlobalMinPooling1D(name="min_pooling_%d" % i)(summed_shapelet_layer))
            pool_layers_locations.append(GlobalArgminPooling1D(name="min_pooling_%d" % i)(summed_shapelet_layer))
        if len(shapelet_sizes) > 1:
            concatenated_features = concatenate(pool_layers)
            concatenated_locations = concatenate(pool_layers_locations)
        else:
            concatenated_features = pool_layers[0]
            concatenated_locations = pool_layers_locations[0]
        if self.weight_regularizer > 0.:
            outputs = Dense(units=n_classes,
                            activation="softmax",
                            kernel_regularizer=l2(self.weight_regularizer),
                            name="softmax")(concatenated_features)
        else:
            outputs = Dense(units=n_classes,
                            activation="softmax",
                            name="softmax")(concatenated_features)
        self.model = Model(inputs=inputs, outputs=outputs)
        self.transformer_model = Model(inputs=inputs, outputs=concatenated_features)
        self.locator_model = Model(inputs=inputs, outputs=concatenated_locations)
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='relu')(concat_c_q)
        relu_c_q = Dropout(0.25)(relu_c_q)
        concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60, return_sequences=True))(embedded_context)
        l_lstm_c = Bidirectional(LSTM(60))(l_lstm_c)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='relu')(concat_c_q)
        relu_c_q = Dropout(0.25)(relu_c_q)
        concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q_a = concatenate([l_lstm_a, l_lstm_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='tanh')(concat_c_q)
        concat_c_a = concatenate([l_lstm_a, l_lstm_c], axis=1)
        relu_c_a = Dense(100, activation='tanh')(concat_c_a)
        relu_c_q = Dropout(0.5)(relu_c_q)
        relu_c_a = Dropout(0.5)(relu_c_a)
        concat_c_q_a = merge([relu_c_a, relu_c_q], mode='cos')
        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def attend_before_step(
        self,
        inputs,
        attended,
        attention_states_tm1,
        recurrent_states_tm1,
        recurrent_constants
    ):
        attention_h, attention_states = \
            self.attention_step(
                attended=attended,
                attention_states=attention_states_tm1,
                step_input=inputs,
                recurrent_states=recurrent_states_tm1,
            )
        if self.concatenate_input:
            recurrent_input = concatenate([attention_h, inputs])
        else:
            recurrent_input = attention_h

        output, recurrent_states = self.recurrent_layer.step(
            recurrent_input,
            recurrent_states_tm1 + recurrent_constants
        )

        if self.return_attention:
            output = concatenate([output, attention_h])

        return output, attention_states + recurrent_states
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def attend_after_step(
        self,
        inputs,
        attended,
        attention_states_tm1,
        recurrent_states_tm1,
        recurrent_constants
    ):
        attention_h_tm1 = attention_states_tm1[0]

        if self.concatenate_input:
            recurrent_input = concatenate([attention_h_tm1, inputs])
        else:
            recurrent_input = attention_h_tm1

        output, recurrent_states = self.recurrent_layer.step(
            recurrent_input,
            recurrent_states_tm1 + recurrent_constants
        )

        attention_h, attention_states = \
            self.attention_step(
                attended=attended,
                attention_states=attention_states_tm1,
                step_input=inputs,
                recurrent_states=recurrent_states
            )

        if self.return_attention:
            output = concatenate([output, attention_h])

        return output, attention_states + recurrent_states