Python keras.layers 模块,Concatenate() 实例源码

我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用keras.layers.Concatenate()

项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def sample_standard_normal_noise(inputs, **kwargs):
    from keras.backend import shape, random_normal
    n_samples = kwargs.get('n_samples', shape(inputs)[0])
    n_basis_noise_vectors = kwargs.get('n_basis', -1)
    data_dim = kwargs.get('data_dim', 1)
    noise_dim = kwargs.get('noise_dim', data_dim)
    seed = kwargs.get('seed', 7)

    if n_basis_noise_vectors > 0:
        samples_isotropic = random_normal(shape=(n_samples, n_basis_noise_vectors, noise_dim),
                                          mean=0, stddev=1, seed=seed)
    else:
        samples_isotropic = random_normal(shape=(n_samples, noise_dim),
                                          mean=0, stddev=1, seed=seed)
    op_mode = kwargs.get('mode', 'none')
    if op_mode == 'concatenate':
        concat = Concatenate(axis=1, name='enc_noise_concatenation')([inputs, samples_isotropic])
        return concat
    elif op_mode == 'add':
        resized_noise = Dense(data_dim, activation=None, name='enc_resized_noise_sampler')(samples_isotropic)
        added_noise_data = Add(name='enc_adding_noise_data')([inputs, resized_noise])
        return added_noise_data
    return samples_isotropic
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_single_continuous_dqn_input():
    nb_actions = 2

    V_model = Sequential()
    V_model.add(Flatten(input_shape=(2, 3)))
    V_model.add(Dense(1))

    mu_model = Sequential()
    mu_model.add(Flatten(input_shape=(2, 3)))
    mu_model.add(Dense(nb_actions))

    L_input = Input(shape=(2, 3))
    L_input_action = Input(shape=(nb_actions,))
    x = Concatenate()([Flatten()(L_input), L_input_action])
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[L_input_action, L_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_single_ddpg_input():
    nb_actions = 2

    actor = Sequential()
    actor.add(Flatten(input_shape=(2, 3)))
    actor.add(Dense(nb_actions))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(2, 3), name='observation_input')
    x = Concatenate()([action_input, Flatten()(observation_input)])
    x = Dense(1)(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
                      nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
项目:keras-inception-resnet-v2    作者:myutwo150    | 项目源码 | 文件源码
def _generate_layer_name(name, branch_idx=None, prefix=None):
    """Utility function for generating layer names.

    If `prefix` is `None`, returns `None` to use default automatic layer names.
    Otherwise, the returned layer name is:
        - PREFIX_NAME if `branch_idx` is not given.
        - PREFIX_Branch_0_NAME if e.g. `branch_idx=0` is given.

    # Arguments
        name: base layer name string, e.g. `'Concatenate'` or `'Conv2d_1x1'`.
        branch_idx: an `int`. If given, will add e.g. `'Branch_0'`
            after `prefix` and in front of `name` in order to identify
            layers in the same block but in different branches.
        prefix: string prefix that will be added in front of `name` to make
            all layer names unique (e.g. which block this layer belongs to).

    # Returns
        The layer name.
    """
    if prefix is None:
        return None
    if branch_idx is None:
        return '_'.join((prefix, name))
    return '_'.join((prefix, 'Branch', str(branch_idx), name))
项目:multi-gpu-keras-tf    作者:sallamander    | 项目源码 | 文件源码
def _parallelize_model(self, model, gpu_ids, batch_size):
        """Parallelize the model over the given gpu_ids

        Note: This is largely copied from:

        https://github.com/kuza55/keras-extras/blob/master/utils/multi_gpu.py

        :param model: Keras Model instance
        :param gpu_ids: list of integers to run the model on
        :param batch_size: int holding the batch size to use during training;
         if multiple gpus are passed in, one batch with batch_size will be run
         on each gpu

        TODO: Put what is returned here
        """

        all_sliced_outputs = []
        for gpu_id in gpu_ids:
            with tf.device('/gpu:{}'.format(gpu_id)):

                sliced_inputs = []
                # ignore the defining of the cell (idx_min, idx_max) variable;
                # this should be fine since we call the Lambda in the next line
                # pylint: disable=cell-var-from-loop
                for model_input in model.inputs:
                    idx_min = gpu_id * batch_size
                    idx_max = (gpu_id + 1) * batch_size
                    input_slice = Lambda(
                        lambda x: x[idx_min:idx_max],
                        lambda shape: shape
                    )(model_input)
                    sliced_inputs.append(input_slice)

                sliced_outputs = model(sliced_inputs)
                all_sliced_outputs.append(sliced_outputs)

        with tf.device('/cpu:0'):
            outputs = Concatenate(axis=0)(all_sliced_outputs)

            super().__init__(inputs=model.inputs, outputs=outputs)
项目:auto-triage    作者:zhijian-liu    | 项目源码 | 文件源码
def create_model(FLAGS):
  input_a = Input(shape = (224, 224, 3))
  input_b = Input(shape = (224, 224, 3))

  if FLAGS.siamese == "share":
    extractor = feature_extractor(FLAGS)
    feature_a = extractor(input_a)
    feature_b = extractor(input_b)
  elif FLAGS.siamese == "separate":
    extractor_a = feature_extractor(FLAGS, "a")
    extractor_b = feature_extractor(FLAGS, "b")
    feature_a = extractor_a(input_a)
    feature_b = extractor_b(input_b)
  else:
    raise NotImplementedError

  if FLAGS.module == "subtract":
    feature = Lambda(lambda x: x[0] - x[1], output_shape = lambda s: s[0])([feature_a, feature_b])
  elif FLAGS.module == "bilinear":
    raise NotImplementedError
  elif FLAGS.module == "neural":
    feature = Concatenate()([feature_a, feature_b])
    feature = Dense(128, activation = FLAGS.activation)(feature)
  else:
    raise NotImplementedError

  hidden1 = Dropout(0.5)(Dense(128, activation = FLAGS.activation)(feature))
  hidden2 = Dense(128, activation = FLAGS.activation)(hidden1)
  output = Dense(2, activation = "softmax")(hidden2)

  model = Model([input_a, input_b], output)

  if FLAGS.regularizer == "l2":
    add_regularizer(model)
  elif FLAGS.regularizer != "none":
    raise NotImplementedError
  return model
项目:keras-squeezenet    作者:wohlert    | 项目源码 | 文件源码
def _fire(x, filters, name="fire"):
    sq_filters, ex1_filters, ex2_filters = filters
    squeeze = Convolution2D(sq_filters, (1, 1), activation='relu', padding='same', name=name + "/squeeze1x1")(x)
    expand1 = Convolution2D(ex1_filters, (1, 1), activation='relu', padding='same', name=name + "/expand1x1")(squeeze)
    expand2 = Convolution2D(ex2_filters, (3, 3), activation='relu', padding='same', name=name + "/expand3x3")(squeeze)
    x = Concatenate(axis=-1, name=name)([expand1, expand2])
    return x
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def synthetic_encoder(data_dim, noise_dim, latent_dim=2):
    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    noise_input = Input(shape=(noise_dim,), name='enc_internal_noise_input')
    data_noise_concat = Concatenate(axis=1, name='enc_data_noise_concatenation')([data_input, noise_input])
    encoder_body = repeat_dense(data_noise_concat, n_layers=2, n_units=256, name_prefix='enc_body')
    latent_factors = Dense(latent_dim, activation=None, name='enc_latent')(encoder_body)
    latent_model = Model(inputs=[data_input, noise_input], outputs=latent_factors, name='enc_internal_model')
    return latent_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def mnist_encoder_simple(data_dim, noise_dim, latent_dim=8):
    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    noise_input = Input(shape=(noise_dim,), name='enc_internal_noise_input')
    # center the input around 0
    # centered_data = Lambda(lambda x: 2 * x - 1, name='enc_centering_data_input')(data_input)
    # concat_input = Concatenate(axis=-1, name='enc_noise_data_concat')([centered_data, noise_input])
    enc_body = repeat_dense(data_input, n_layers=2, n_units=256, activation='relu', name_prefix='enc_body')
    enc_output = Dense(100, activation='relu', name='enc_dense_before_latent')(enc_body)
    enc_output = Dense(latent_dim, name='enc_latent_features')(enc_output)
    noise_resized = Dense(latent_dim, activation=None, name='enc_noise_resizing')(noise_input)
    enc_output = Add(name='enc_add_noise_data')([enc_output, noise_resized])
    latent_factors = Model(inputs=[data_input, noise_input], outputs=enc_output, name='enc_internal_model')

    return latent_factors
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_clone_graph_model():
    in1 = Input(shape=(2,))
    in2 = Input(shape=(3,))
    x = Dense(8)(Concatenate()([in1, in2]))
    graph = Model([in1, in2], x)
    graph.compile(optimizer='sgd', loss='mse')

    clone = clone_model(graph)
    clone.compile(optimizer='sgd', loss='mse')

    ins = [np.random.random((4, 2)), np.random.random((4, 3))]
    y_pred_graph = graph.predict_on_batch(ins)
    y_pred_clone = clone.predict_on_batch(ins)
    assert y_pred_graph.shape == y_pred_clone.shape
    assert_allclose(y_pred_graph, y_pred_clone)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_multi_dqn_input():
    input1 = Input(shape=(2, 3))
    input2 = Input(shape=(2, 4))
    x = Concatenate()([input1, input2])
    x = Flatten()(x)
    x = Dense(2)(x)
    model = Model(inputs=[input1, input2], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    processor = MultiInputProcessor(nb_inputs=2)
    for double_dqn in (True, False):
        agent = DQNAgent(model, memory=memory, nb_actions=2, nb_steps_warmup=5, batch_size=4,
                         processor=processor, enable_double_dqn=double_dqn)
        agent.compile('sgd')
        agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=10)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_multi_continuous_dqn_input():
    nb_actions = 2

    V_input1 = Input(shape=(2, 3))
    V_input2 = Input(shape=(2, 4))
    x = Concatenate()([V_input1, V_input2])
    x = Flatten()(x)
    x = Dense(1)(x)
    V_model = Model(inputs=[V_input1, V_input2], outputs=x)

    mu_input1 = Input(shape=(2, 3))
    mu_input2 = Input(shape=(2, 4))
    x = Concatenate()([mu_input1, mu_input2])
    x = Flatten()(x)
    x = Dense(nb_actions)(x)
    mu_model = Model(inputs=[mu_input1, mu_input2], outputs=x)

    L_input1 = Input(shape=(2, 3))
    L_input2 = Input(shape=(2, 4))
    L_input_action = Input(shape=(nb_actions,))
    x = Concatenate()([L_input1, L_input2])
    x = Concatenate()([Flatten()(x), L_input_action])
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[L_input_action, L_input1, L_input2], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    processor = MultiInputProcessor(nb_inputs=2)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=5, batch_size=4, processor=processor)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=10)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_multi_cem_input():
    input1 = Input(shape=(2, 3))
    input2 = Input(shape=(2, 4))
    x = Concatenate()([input1, input2])
    x = Flatten()(x)
    x = Dense(2)(x)
    model = Model(inputs=[input1, input2], outputs=x)

    memory = EpisodeParameterMemory(limit=10, window_length=2)
    processor = MultiInputProcessor(nb_inputs=2)
    agent = CEMAgent(model, memory=memory, nb_actions=2, nb_steps_warmup=5, batch_size=4,
                     processor=processor, train_interval=50)
    agent.compile()
    agent.fit(MultiInputTestEnv([(3,), (4,)]), nb_steps=100)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_cdqn():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    V_model = Sequential()
    V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    V_model.add(Dense(16))
    V_model.add(Activation('relu'))
    V_model.add(Dense(1))

    mu_model = Sequential()
    mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    mu_model.add(Dense(16))
    mu_model.add(Activation('relu'))
    mu_model.add(Dense(nb_actions))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    x = Concatenate()([action_input, Flatten()(observation_input)])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=50, random_process=random_process,
                     gamma=.99, target_model_update=1e-3)
    agent.compile(Adam(lr=1e-3))

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_ddpg():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    actor = Sequential()
    actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    actor.add(Dense(16))
    actor.add(Activation('relu'))
    actor.add(Dense(nb_actions))
    actor.add(Activation('linear'))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3)
    agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                      memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50,
                      random_process=random_process, gamma=.99, target_model_update=1e-3)
    agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)])

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history
项目:kchar    作者:jarfo    | 项目源码 | 文件源码
def CNN(seq_length, length, input_size, feature_maps, kernels, x):

    concat_input = []
    for feature_map, kernel in zip(feature_maps, kernels):
        reduced_l = length - kernel + 1
        conv = Conv2D(feature_map, (1, kernel), activation='tanh', data_format="channels_last")(x)
        maxp = MaxPooling2D((1, reduced_l), data_format="channels_last")(conv)
        concat_input.append(maxp)

    x = Concatenate()(concat_input)
    x = Reshape((seq_length, sum(feature_maps)))(x)
    return x
项目:CIAN    作者:yanghanxy    | 项目源码 | 文件源码
def CNN(seq_length, length, input_size, feature_maps, kernels, x):
    concat_input = []
    for feature_map, kernel in zip(feature_maps, kernels):
        reduced_l = length - kernel + 1
        conv = Conv2D(feature_map, (1, kernel), activation='tanh', data_format="channels_last")(x)
        maxp = MaxPooling2D((1, reduced_l), data_format="channels_last")(conv)
        concat_input.append(maxp)
    con = Concatenate()(concat_input)
    con = Reshape((seq_length, sum(feature_maps)))(con)
    return con
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_deepcsv(Inputs,nclasses,nregclasses,dropoutRate=-1):

    cpf=Inputs[1]
    vtx=Inputs[2]

    cpf  = Convolution1D(64, 1, kernel_initializer='lecun_uniform',  activation='relu', name='cpf_conv0')(cpf)
    cpf = Dropout(dropoutRate)(cpf)                                                   
    cpf  = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu', name='cpf_conv1')(cpf)
    cpf = Dropout(dropoutRate)(cpf)                                                   
    cpf  = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu', name='cpf_conv2')(cpf)
    cpf = Dropout(dropoutRate)(cpf)                                                   
    cpf  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu' , name='cpf_conv3')(cpf)


    vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform',  activation='relu', name='vtx_conv0')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu', name='vtx_conv1')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu', name='vtx_conv2')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu', name='vtx_conv3')(vtx)

    cpf=Flatten()(cpf)
    vtx=Flatten()(vtx)

    x = Concatenate()( [Inputs[0],cpf,vtx ])

    x  = block_deepFlavourDense(x,dropoutRate)

    predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_broad(Inputs,nclasses,nregclasses,dropoutRate=-1):
    """
    reference 1x1 convolutional model for 'deepFlavour', as for DPS note
    """  

    cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1],
                                                neutrals=Inputs[2],
                                                vertices=Inputs[3],
                                                dropoutRate=dropoutRate)


    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf = Dropout(dropoutRate)(npf)

    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)

    image = block_SchwartzImage(image=Inputs[4],dropoutRate=dropoutRate,active=False)

    x = Concatenate()( [Inputs[0],cpf,npf,vtx,image ])

    x  = block_deepFlavourDense(x,dropoutRate)

    predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_broad_map(Inputs,nclasses,nregclasses,dropoutRate=-1):
    """
    reference 1x1 convolutional model for 'deepFlavour'
    """  

    cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1],
                                                neutrals=Inputs[2],
                                                vertices=Inputs[3],
                                                dropoutRate=dropoutRate)



    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf = Dropout(dropoutRate)(npf)

    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)

    image = block_SchwartzImage(image=Inputs[4],dropoutRate=dropoutRate)

    x = Concatenate()( [Inputs[0],cpf,npf,vtx,image ])

    x  = block_deepFlavourDense(x,dropoutRate)

    predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_broad_reg(Inputs,nclasses,nregclasses,dropoutRate=-1):
    """
    the inputs are really not working as they are. need a reshaping well before
    """  
    cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1],
                                                neutrals=Inputs[2],
                                                vertices=Inputs[3],
                                                dropoutRate=dropoutRate)


    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf = Dropout(dropoutRate)(npf)

    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)

    x = Concatenate()( [Inputs[0],cpf,npf,vtx,Inputs[4] ])

    x  = block_deepFlavourDense(x,dropoutRate)


    predictions = [Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x),
                   Dense(nregclasses, activation='linear',kernel_initializer='ones',name='E_pred')(x)]

    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_ConvCSV(Inputs,nclasses,nregclasses,dropoutRate=0.25):
    """
    Inputs similar to 2016 training, but with covolutional layers on each track and sv
    """

    a  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(Inputs[1])
    a = Dropout(dropoutRate)(a)
    a  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(a)
    a = Dropout(dropoutRate)(a)
    a  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(a)
    a = Dropout(dropoutRate)(a)
    a=Flatten()(a)

    c  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(Inputs[2])
    c = Dropout(dropoutRate)(c)
    c  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(c)
    c = Dropout(dropoutRate)(c)
    c  = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(c)
    c = Dropout(dropoutRate)(c)
    c=Flatten()(c)

    x = Concatenate()( [Inputs[0],a,c] )

    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_delete_channels_merge_concatenate(channel_index, data_format):
    # This should test that the output is the correct shape so it should pass
    # into a Dense layer rather than a Conv layer.
    # The weighted layer is the previous layer,
    # Create model
    if data_format == 'channels_first':
        axis = 1
    elif data_format == 'channels_last':
        axis = -1
    else:
        raise ValueError

    input_shape = list(random.randint(10, 20, size=3))
    input_1 = Input(shape=input_shape)
    input_2 = Input(shape=input_shape)
    x = Conv2D(3, [3, 3], data_format=data_format, name='conv_1')(input_1)
    y = Conv2D(3, [3, 3], data_format=data_format, name='conv_2')(input_2)
    x = Concatenate(axis=axis, name='cat_1')([x, y])
    x = Flatten()(x)
    main_output = Dense(5, name='dense_1')(x)
    model = Model(inputs=[input_1, input_2], outputs=main_output)
    old_w = model.get_layer('dense_1').get_weights()

    # Delete channels
    layer = model.get_layer('cat_1')
    del_layer = model.get_layer('conv_1')
    surgeon = Surgeon(model, copy=True)
    surgeon.add_job('delete_channels', del_layer, channels=channel_index)
    new_model = surgeon.operate()
    new_w = new_model.get_layer('dense_1').get_weights()

    # Calculate next layer's correct weights
    flat_sz = np.prod(layer.get_output_shape_at(0)[1:])
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    if data_format == 'channels_first':
        delete_indices = [x * flat_sz // 2 // channel_count + i for x in
                          channel_index
                          for i in range(0, flat_sz // 2 // channel_count, )]
    elif data_format == 'channels_last':
        delete_indices = [x + i for i in range(0, flat_sz, channel_count*2)
                          for x in channel_index]
    else:
        raise ValueError
    correct_w = model.get_layer('dense_1').get_weights()
    correct_w[0] = np.delete(correct_w[0], delete_indices, axis=0)

    assert weights_equal(correct_w, new_w)
项目:kchar    作者:jarfo    | 项目源码 | 文件源码
def LSTMCNN(opt):
    # opt.seq_length = number of time steps (words) in each batch
    # opt.rnn_size = dimensionality of hidden layers
    # opt.num_layers = number of layers
    # opt.dropout = dropout probability
    # opt.word_vocab_size = num words in the vocab
    # opt.word_vec_size = dimensionality of word embeddings
    # opt.char_vocab_size = num chars in the character vocab
    # opt.char_vec_size = dimensionality of char embeddings
    # opt.feature_maps = table of feature map sizes for each kernel width
    # opt.kernels = table of kernel widths
    # opt.length = max length of a word
    # opt.use_words = 1 if use word embeddings, otherwise not
    # opt.use_chars = 1 if use char embeddings, otherwise not
    # opt.highway_layers = number of highway layers to use, if any
    # opt.batch_size = number of sequences in each batch

    if opt.use_words:
        word = Input(batch_shape=(opt.batch_size, opt.seq_length), dtype='int32', name='word')
        word_vecs = Embedding(opt.word_vocab_size, opt.word_vec_size, input_length=opt.seq_length)(word)

    if opt.use_chars:
        chars = Input(batch_shape=(opt.batch_size, opt.seq_length, opt.max_word_l), dtype='int32', name='chars')
        chars_embedding = TimeDistributed(Embedding(opt.char_vocab_size, opt.char_vec_size, name='chars_embedding'))(chars)
        cnn = CNN(opt.seq_length, opt.max_word_l, opt.char_vec_size, opt.feature_maps, opt.kernels, chars_embedding)
        if opt.use_words:
            x = Concatenate()([cnn, word_vecs])
            inputs = [chars, word]
        else:
            x = cnn
            inputs = chars
    else:
        x = word_vecs
        inputs = word

    if opt.batch_norm:
        x = BatchNormalization()(x)

    for l in range(opt.highway_layers):
        x = TimeDistributed(Highway(activation='relu'))(x)

    for l in range(opt.num_layers):
        x = LSTM(opt.rnn_size, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, stateful=True)(x)

        if opt.dropout > 0:
            x = Dropout(opt.dropout)(x)

    output = TimeDistributed(Dense(opt.word_vocab_size, activation='softmax'))(x)

    model = sModel(inputs=inputs, outputs=output)
    model.summary()

    optimizer = sSGD(lr=opt.learning_rate, clipnorm=opt.max_grad_norm, scale=float(opt.seq_length))
    model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer)

    return model
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_q = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_Q,
                                    trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_A,
                                    trainable=False)

        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        conv_blocksA = []
        conv_blocksQ = []
        for sz in [3,5]:
            conv = Convolution1D(filters=20,
                                 kernel_size=sz,
                                 padding="valid",
                                 activation="relu",
                                 strides=1)(embedded_answer)
            conv = MaxPooling1D(pool_size=2)(conv)
            conv = Flatten()(conv)
            conv_blocksA.append(conv)
        for sz in [5,7, 9]:
            conv = Convolution1D(filters=20,
                                 kernel_size=sz,
                                 padding="valid",
                                 activation="relu",
                                 strides=1)(embedded_question)
            conv = MaxPooling1D(pool_size=3)(conv)
            conv = Flatten()(conv)
            conv_blocksQ.append(conv)

        z = Concatenate()(conv_blocksA + conv_blocksQ)
        z = Dropout(0.5)(z)
        z = Dense(100, activation="relu")(z)
        softmax_c_q = Dense(2, activation='softmax')(z)
        self.model = Model([question, answer], softmax_c_q)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def model_deepFlavourNoNeutralReference(Inputs,nclasses,nregclasses,dropoutRate=0.1):
    """
    reference 1x1 convolutional model for 'deepFlavour'
    with recurrent layers and batch normalisation
    standard dropout rate it 0.1
    should be trained for flavour prediction first. afterwards, all layers can be fixed
    that do not include 'regression' and the training can be repeated focusing on the regression part
    (check function fixLayersContaining with invert=True)
    """  
    globalvars = BatchNormalization(momentum=0.6,name='globals_input_batchnorm') (Inputs[0])
    cpf    =     BatchNormalization(momentum=0.6,name='cpf_input_batchnorm')     (Inputs[1])
    vtx    =     BatchNormalization(momentum=0.6,name='vtx_input_batchnorm')     (Inputs[2])
    ptreginput = BatchNormalization(momentum=0.6,name='reg_input_batchnorm')     (Inputs[3])

    cpf, vtx = block_deepFlavourBTVConvolutions(
        charged=cpf,
        vertices=vtx,
        dropoutRate=dropoutRate,
        active=True,
        batchnorm=True
        )


    #
    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf=BatchNormalization(momentum=0.6,name='cpflstm_batchnorm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx=BatchNormalization(momentum=0.6,name='vtxlstm_batchnorm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)


    x = Concatenate()( [globalvars,cpf,vtx ])

    x = block_deepFlavourDense(x,dropoutRate,active=True,batchnorm=True,batchmomentum=0.6)

    flavour_pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)

    reg = Concatenate()( [flavour_pred, ptreginput ] ) 

    reg_pred=Dense(nregclasses, activation='linear',kernel_initializer='ones',name='regression_pred',trainable=True)(reg)

    predictions = [flavour_pred,reg_pred]
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def model_deepFlavourReference(Inputs,nclasses,nregclasses,dropoutRate=0.1,momentum=0.6):
    """
    reference 1x1 convolutional model for 'deepFlavour'
    with recurrent layers and batch normalisation
    standard dropout rate it 0.1
    should be trained for flavour prediction first. afterwards, all layers can be fixed
    that do not include 'regression' and the training can be repeated focusing on the regression part
    (check function fixLayersContaining with invert=True)
    """  
    globalvars = BatchNormalization(momentum=momentum,name='globals_input_batchnorm') (Inputs[0])
    cpf    =     BatchNormalization(momentum=momentum,name='cpf_input_batchnorm')     (Inputs[1])
    npf    =     BatchNormalization(momentum=momentum,name='npf_input_batchnorm')     (Inputs[2])
    vtx    =     BatchNormalization(momentum=momentum,name='vtx_input_batchnorm')     (Inputs[3])
    ptreginput = BatchNormalization(momentum=momentum,name='reg_input_batchnorm')     (Inputs[4])

    cpf,npf,vtx = block_deepFlavourConvolutions(charged=cpf,
                                                neutrals=npf,
                                                vertices=vtx,
                                                dropoutRate=dropoutRate,
                                                active=True,
                                                batchnorm=True, batchmomentum=momentum)


    #
    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf=BatchNormalization(momentum=momentum,name='cpflstm_batchnorm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf=BatchNormalization(momentum=momentum,name='npflstm_batchnorm')(npf)
    npf = Dropout(dropoutRate)(npf)

    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx=BatchNormalization(momentum=momentum,name='vtxlstm_batchnorm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)


    x = Concatenate()( [globalvars,cpf,npf,vtx ])

    x = block_deepFlavourDense(x,dropoutRate,active=True,batchnorm=True,batchmomentum=momentum)

    flavour_pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)

    reg = Concatenate()( [flavour_pred, ptreginput ] ) 

    reg_pred=Dense(nregclasses, activation='linear',kernel_initializer='ones',name='regression_pred',trainable=True)(reg)

    predictions = [flavour_pred,reg_pred]
    model = Model(inputs=Inputs, outputs=predictions)
    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def convolutional_model_lessbroad(Inputs,nclasses,nregclasses,dropoutRate=-1):
    """
    the inputs are really not working as they are. need a reshaping well before
    """

    #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0])
    #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)
    #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl)


    cpf  = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu')(Inputs[1])
    cpf = Dropout(dropoutRate)(cpf)
    cpf  = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu')(cpf)
    cpf = Dropout(dropoutRate)(cpf)
    cpf  = Convolution1D(16, 1, kernel_initializer='lecun_uniform',  activation='relu')(cpf)
    cpf = Dropout(dropoutRate)(cpf)
    cpf = Flatten()(cpf)


    npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform',  activation='relu')(Inputs[2])
    npf = Dropout(dropoutRate)(npf)
    npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform',  activation='relu')(npf)
    npf = Dropout(dropoutRate)(npf)
    npf = Flatten()(npf)

    vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu')(Inputs[3])
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform',  activation='relu')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Convolution1D(16, 1, kernel_initializer='lecun_uniform',  activation='relu')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    vtx = Flatten()(vtx)

    x = Concatenate()( [Inputs[0],cpf,npf,vtx ] )
    x = Dropout(dropoutRate)(x)

    x=  Dense(600, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    x = Dropout(dropoutRate)(x)
    x=  Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x)
    predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model