Python keras.backend 模块,relu() 实例源码

我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用keras.backend.relu()

项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V)
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5):
    d_input = dim_ordering_input(input_shape, name="input_x")
    nch = 512
    # nch = 128
    H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Flatten()(H)
    H = Dense(int(nch / 2))(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    d_V = Dense(1, activation='sigmoid')(H)
    return Model(d_input, d_V)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def call(self, inputs):
        if self.data_format == 'channels_first':
            sq = K.mean(inputs, [2, 3])
        else:
            sq = K.mean(inputs, [1, 2])

        ex = K.dot(sq, self.kernel1)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias1)
        ex= K.relu(ex)

        ex = K.dot(ex, self.kernel2)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias2)
        ex= K.sigmoid(ex)

        if self.data_format == 'channels_first':
            ex = K.expand_dims(ex, -1)
            ex = K.expand_dims(ex, -1)
        else:
            ex = K.expand_dims(ex, 1)
            ex = K.expand_dims(ex, 1)

        return inputs * ex
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def model_fn(input_dim,
             labels_dim,
             hidden_units=[100, 70, 50, 20],
             learning_rate=0.1):
  """Create a Keras Sequential model with layers."""
  model = models.Sequential()

  for units in hidden_units:
    model.add(layers.Dense(units=units,
                           input_dim=input_dim,
                           activation=relu))
    input_dim = units

  # Add a dense final layer with sigmoid function
  model.add(layers.Dense(labels_dim, activation=sigmoid))
  compile_model(model, learning_rate)
  return model
项目:reactionrnn    作者:minimaxir    | 项目源码 | 文件源码
def reactionrnn_model(weights_path, num_classes, maxlen=140):
    '''
    Builds the model architecture for textgenrnn and
    loads the pretrained weights for the model.
    '''

    input = Input(shape=(maxlen,), name='input')
    embedded = Embedding(num_classes, 100, input_length=maxlen,
                         name='embedding')(input)
    rnn = GRU(256, return_sequences=False, name='rnn')(embedded)
    output = Dense(5, name='output',
                   activation=lambda x: K.relu(x) / K.sum(K.relu(x),
                                                          axis=-1))(rnn)

    model = Model(inputs=[input], outputs=[output])
    model.load_weights(weights_path, by_name=True)
    model.compile(loss='mse', optimizer='nadam')
    return model
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_gradcam(image,model,layer_name,mode):
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    if mode == "abnormal":
        loss += K.sum(model.output)
    elif mode == "normal":
        loss += K.sum(1 - model.output)
    else:
        raise ValueError("mode must be normal or abnormal")
    #gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss,layer.output)[0]
    feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
    return fetch_heatmap([image,0])[0]
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def get_output(self, x):
        """
        Generate filters for given input
        """

        # Assuming 'th' ordering
        # Input shape (batch, channels, rows, columns)
        # Output shape (batch, filter_size ** 2, rows, columns)

        # Use input to generate filter
        # (batch, 15, rows, columns)
        output = K.relu(K.conv2d(x, self.kernel1, border_mode="same"))

        # (batch, rows, columns, 15)
        output = K.permute_dimensions(output, (0, 2, 3, 1))

        # (batch, rows, columns, 20)
        # output = K.tanh(K.dot(output, self.w1) + self.b1)
        # (batch, rows, columns, fs**2)
        output = K.tanh(K.dot(output, self.w2) + self.b2)

        # (batch, fs**2, rows, columns)
        output = K.permute_dimensions(output, (0, 3, 1, 2))

        return output
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V)
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5):
    d_input = dim_ordering_input(input_shape, name="input_x")
    nch = 512
    # nch = 128
    H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    H = Flatten()(H)
    H = Dense(int(nch / 2))(H)
    H = LeakyReLU(0.2)(H)
    H = Dropout(dropout_rate)(H)
    d_V = Dense(1, activation='sigmoid')(H)
    return Model(d_input, d_V)
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def leaky_relu(x):
    return K.relu(x, 0.2)
项目:deep-learning-models    作者:fchollet    | 项目源码 | 文件源码
def relu6(x):
    return K.relu(x, max_value=6)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x -= K.mean(x, axis=1, keepdims=True)
        x = K.l2_normalize(x, axis=1)
        pos = K.relu(x)
        neg = K.relu(-x)
        return K.concatenate([pos, neg], axis=1)

# global parameters
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_activation():
    # with string argument
    layer_test(core.Activation,
               kwargs={'activation': 'relu'},
               input_shape=(3, 2))

    # with function argument
    layer_test(core.Activation,
               kwargs={'activation': K.relu},
               input_shape=(3, 2))
项目:MobileNetworks    作者:titu1994    | 项目源码 | 文件源码
def relu6(x):
    return K.relu(x, max_value=6)
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def call(self, inputs):
        inputs -= K.mean(inputs, axis=1, keepdims=True)
        inputs = K.l2_normalize(inputs, axis=1)
        pos = K.relu(inputs)
        neg = K.relu(-inputs)
        return K.concatenate([pos, neg], axis=1)

# global parameters
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_gradcam(image, model, layer_name):
    # remove dropout/noise layers
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(model.output)
    # gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss, layer.output)[0]
    feature_weights = K.mean(upstream_grads, axis=[1, 2])
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input], [heatmap])
    return fetch_heatmap([image])[0]
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def get_output(self, x):
        """
        Generate filters for given input
        """

        # Assuming 'th' ordering
        # Input shape (batch, channels, rows, columns)
        # Output shape (batch, filter_size ** 2, rows, columns)

        # Use input to generate filter
        # (batch, 10, rows, columns)
        output = K.relu(K.conv2d(x, self.kernel1, border_mode="same"))

        # (batch, 15, rows, columns)
        output = K.concatenate([output, self.coordinates], axis=1)

        # (batch, rows, columns, 15)
        output = K.permute_dimensions(output, (0, 2, 3, 1))

        # (batch, rows, columns, 20)
        # output = K.tanh(K.dot(output, self.w1) + self.b1)
        # (batch, rows, columns, fs**2)
        output = K.tanh(K.dot(output, self.w2) + self.b2)

        # (batch, fs**2, rows, columns)
        output = K.permute_dimensions(output, (0, 3, 1, 2))

        return output
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs):
        x = K.square(inputs)
        kernel = K.relu(self.kernel)
        bias = K.relu(self.bias)
        if self.rank == 1:
            outputs = K.conv1d(
                x,
                kernel,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(
                x,
                kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(
                x,
                kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        outputs = K.bias_add(
            outputs,
            bias,
            data_format=self.data_format)

        outputs = K.sqrt(outputs + K.epsilon())
        return inputs / outputs
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def relu_limited(x, alpha=0., max_value=1.):
    return K.relu(x, alpha=alpha, max_value=max_value)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def relu_limited(x, alpha=0., max_value=1.):
    return K.relu(x, alpha=alpha, max_value=max_value)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def relu_limited(x, alpha=0., max_value=1.): # won't be used here
    return K.relu(x, alpha=alpha, max_value=max_value)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def create_model(model, dataset, inputdir=None, variable_scope='pixelcnnpp-', **kwargs):
    with tf.variable_scope(variable_scope):
        X = tf.placeholder(tf.float32, [None, dataset.nfeatures], name='X')

        input_layer = X
        input_layer_size = dataset.nfeatures

        # Add some non-linearities
        input_layer = Dense(128, W_regularizer=l2(0.01), activation=K.relu)(input_layer)
        input_layer = Dropout(0.5)(input_layer)
        input_layer_size = 128
        input_layer = Dense(64, W_regularizer=l2(0.01), activation=K.relu)(input_layer)
        input_layer = Dropout(0.5)(input_layer)
        input_layer_size = 64

        if model == 'multinomial':
            dist_model = MultinomialLayer(input_layer, input_layer_size, dataset.nlabels, **kwargs)
        elif model == 'gmm':
            dist_model = DiscreteParametricMixtureLayer(input_layer, input_layer_size, dataset.nlabels, one_hot=False, **kwargs)
        elif model == 'lmm':
            dist_model = DiscreteLogisticMixtureLayer(input_layer, input_layer_size, dataset.nlabels, one_hot=False, **kwargs)
        elif model == 'sdp':
            dist_model = LocallySmoothedMultiscaleLayer(input_layer, input_layer_size, dataset.nlabels, one_hot=False, **kwargs)
        elif model == 'fast-sdp':
            dist_model = ScalableLocallySmoothedMultiscaleLayer(input_layer, input_layer_size, dataset.nlabels, one_hot=False, **kwargs)
        else:
            raise Exception('Unknown model type: {0}'.format(model))

        return Model(dist_model, x=X, density=dist_model.density, labels=dist_model.labels,
                       train_loss=dist_model.train_loss, test_loss=dist_model.test_loss)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def create_model(model, x_shape, y_shape, variable_scope='pixels-', dimsize=256, **kwargs):
    with tf.variable_scope(variable_scope):
        X_image = tf.placeholder(tf.float32, [None] + list(x_shape[1:]), name='X')
        conv1 = Convolution2D(32, 3, 3, border_mode='same', activation=K.relu, W_regularizer=l2(0.01),
                                        input_shape=x_shape[1:])(X_image)
        pool1 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv1)
        drop1 = Dropout(0.5)(pool1)
        conv2 = Convolution2D(64, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01))(drop1)
        pool2 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv2)
        drop2 = Dropout(0.5)(pool2)
        drop2_flat = tf.reshape(drop2, [-1, 3*3*64])
        hidden1 = Dense(1024, W_regularizer=l2(0.01), activation=K.relu)(drop2_flat)
        drop_h1 = Dropout(0.5)(hidden1)
        hidden2 = Dense(128, W_regularizer=l2(0.01), activation=K.relu)(drop_h1)
        drop_h2 = Dropout(0.5)(hidden2)
        hidden3 = Dense(32, W_regularizer=l2(0.01), activation=K.relu)(drop_h2)
        drop_h3 = Dropout(0.5)(hidden3)

        num_classes = tuple([dimsize]*y_shape[1])
        print(num_classes)
        if model == 'multinomial':
            dist_model = MultinomialLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'gmm':
            dist_model = DiscreteParametricMixtureLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'lmm':
            dist_model = DiscreteLogisticMixtureLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'sdp':
            dist_model = LocallySmoothedMultiscaleLayer(drop_h3, 32, num_classes, **kwargs)
        else:
            raise Exception('Unknown model type: {0}'.format(model))

        return X_image, dist_model
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def create_model(model, dataset, inputdir='experiments/uci/data', variable_scope='uci-', **kwargs):
    with tf.variable_scope(variable_scope):
        x_shape, num_classes, nsamples = dataset_details(dataset, inputdir)
        X = tf.placeholder(tf.float32, [None, x_shape], name='X')
        layer_sizes = [256, 128, 64]
        hidden1 = Dense(layer_sizes[0], W_regularizer=l2(0.01), activation=K.relu)(X)
        drop_h1 = Dropout(0.5)(hidden1)
        hidden2 = Dense(layer_sizes[1], W_regularizer=l2(0.01), activation=K.relu)(drop_h1)
        drop_h2 = Dropout(0.5)(hidden2)
        hidden3 = Dense(layer_sizes[2], W_regularizer=l2(0.01), activation=K.relu)(drop_h2)
        drop_h3 = Dropout(0.5)(hidden3)

        print(num_classes)
        if model == 'multinomial':
            dist_model = MultinomialLayer(drop_h3, layer_sizes[-1], num_classes, **kwargs)
        elif model == 'gmm':
            dist_model = DiscreteParametricMixtureLayer(drop_h3, layer_sizes[-1], num_classes, one_hot=False, **kwargs)
        elif model == 'lmm':
            dist_model = DiscreteLogisticMixtureLayer(drop_h3, layer_sizes[-1], num_classes, one_hot=False, **kwargs)
        elif model == 'sdp':
            dist_model = LocallySmoothedMultiscaleLayer(drop_h3, layer_sizes[-1], num_classes, one_hot=False, **kwargs)
        elif model == 'fast-sdp':
            dist_model = ScalableLocallySmoothedMultiscaleLayer(drop_h3, layer_sizes[-1], num_classes, one_hot=False, **kwargs)
        else:
            raise Exception('Unknown model type: {0}'.format(model))

        return Model(dist_model, x=X, density=dist_model.density, labels=dist_model.labels,
                       train_loss=dist_model.train_loss, test_loss=dist_model.test_loss)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def neural_network(self, X):
    """pi, mu, sigma = NN(x; theta)"""
    X_image = tf.reshape(X, [-1,IMAGE_ROWS,IMAGE_COLS,1])
    conv1 = Convolution2D(32, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01),
                          input_shape=(IMAGE_ROWS, IMAGE_COLS, 1))(X_image)
    pool1 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv1)
    conv2 = Convolution2D(64, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01))(pool1)
    pool2 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv2)
    pool2_flat = tf.reshape(pool2, [-1, IMAGE_ROWS//4 * IMAGE_COLS//4 * 64])
    hidden1 = Dense(1024, W_regularizer=l2(0.01), activation=K.relu)(pool2_flat)
    hidden2 = Dense(64, W_regularizer=l2(0.01), activation=K.relu)(hidden1)
    self.mus = Dense(self.K)(hidden2)
    self.sigmas = Dense(self.K, activation=K.softplus)(hidden2)
    self.pi = Dense(self.K, activation=K.softmax)(hidden2)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x -= K.mean(x, axis=1, keepdims=True)
        x = K.l2_normalize(x, axis=1)
        pos = K.relu(x)
        neg = K.relu(-x)
        return K.concatenate([pos, neg], axis=1)

# global parameters
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_activation():
    # with string argument
    layer_test(core.Activation,
               kwargs={'activation': 'relu'},
               input_shape=(3, 2))

    # with function argument
    layer_test(core.Activation,
               kwargs={'activation': K.relu},
               input_shape=(3, 2))
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x -= K.mean(x, axis=1, keepdims=True)
        x = K.l2_normalize(x, axis=1)
        pos = K.relu(x)
        neg = K.relu(-x)
        return K.concatenate([pos, neg], axis=1)

#Result dictionary
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x -= K.mean(x, axis=1, keepdims=True)
        x = K.l2_normalize(x, axis=1)
        pos = K.relu(x)
        neg = K.relu(-x)
        return K.concatenate([pos, neg], axis=1)

# global parameters
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_activation():
    # with string argument
    layer_test(core.Activation,
               kwargs={'activation': 'relu'},
               input_shape=(3, 2))

    # with function argument
    layer_test(core.Activation,
               kwargs={'activation': K.relu},
               input_shape=(3, 2))
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def neg_logl(Y_true, Y_pred):
        y       =        K.flatten(        Y_true)
        mean    =        K.flatten(        Y_pred[:,:,:,0] )
        logvar  = tf.add(K.flatten( K.relu(Y_pred[:,:,:,1])), 0.001)  # ensures it is positive

        logl = -0.5*K.mean(K.log(logvar) + K.square(mean - y)/logvar, axis=-1)
        return -logl
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def neg_logl(Y_true, Y_pred):
    weights = tf.add(0.0001,tf.square(Y_pred[:,:,:,2*args.nrGaussians:3*args.nrGaussians])) # 4:6
    weights = tf.div(weights, tf.expand_dims(tf.reduce_sum(weights, reduction_indices=3), 3))
    y       =        K.flatten(        tf.tile(Y_true,[1,1,1,args.nrGaussians]))
    mean    =        K.flatten(        Y_pred[:,:,:,0*args.nrGaussians:1*args.nrGaussians] )
    var     = tf.add(K.flatten( K.relu(Y_pred[:,:,:,1*args.nrGaussians:2*args.nrGaussians])), 0.001)  # ensures it is positive
    W       =        K.flatten(       weights[:,:,:,:])
    logl    = -0.5*K.mean(W * (K.log(var) + K.square(mean - y)/var), axis=-1)

    return -logl
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def leaky_relu(x):
    return K.relu(x, 0.2)
项目:KerasRL    作者:aejax    | 项目源码 | 文件源码
def __init__(self, S=None, A=None, model=None, loss='mse', optimizer='adam', bounds=False, batch_size=32, **kwargs):
        self.S = S
        self.A = A

        if model == None:
            assert self.S != None and self.A != None, 'You must either specify a model or as state and action space.'
            self.s_dim = get_space_dim(self.S)
            self.a_dim = get_space_dim(self.A)

            self.model = Sequential()
            self.model.add(Dense(100, activation='tanh', input_dim=self.s_dim))
            self.model.add(Dense(self.a_dim))
        else:
            self.model = model

        if bounds:
            out_shape = self.model.outputs[0]._keras_shape
            Umin = Input(shape=(out_shape[-1],), name='Umin')
            Lmax = Input(shape=(out_shape[-1],), name='Lmax')
            output = merge(self.model.outputs+[Umin,Lmax], mode=lambda l: l[0] + 1e-6*l[1] + 1e-6*l[2], output_shape=(out_shape[-1],))
            self.model = Model(input=self.model.inputs+[Umin,Lmax], output=output)

            def bounded_loss(y_true, y_pred):
                penalty = 4
                mse = K.square(y_pred - y_true)
                lb = penalty*K.relu(K.square(Lmax - y_pred))
                ub = penalty*K.relu(K.square(y_pred - Umin))
                return K.sum(mse + lb + ub, axis=-1)
            loss = bounded_loss

        self.model.compile(loss=loss, optimizer=optimizer)
        self.loss = loss

        super(KerasQ, self).__init__(**kwargs)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def neural_fingerprint_layer(inputs, atom_features_of_previous_layer, num_atom_features, 
                             conv_width, fp_length, L2_reg, num_bond_features , 
                             batch_normalization = False, layer_index=0):
    '''
    one layer of the "convolutional" neural-fingerprint network

    This implementation uses indexing to select the features of neighboring atoms, and binary matrices to map atoms in the batch to the indiviual molecules in the batch.
    '''
#    atom_features_of_previous_layer has shape: (variable_a, num_input_atom_features) [if first layer] or (variable_a, conv_width)

    activations_by_degree = []


    for degree in config.ATOM_DEGREES:

        atom_features_of_previous_layer_this_degree = layers.Lambda(lambda x: backend.dot(inputs['atom_features_selector_matrix_degree_'+str(degree)], x))(atom_features_of_previous_layer) # layers.Lambda(lambda x: backend.dot(inputs['atom_features_selector_matrix_degree_'+str(degree)], x))(atom_features_of_previous_layer)


        merged_atom_bond_features = layers.merge([atom_features_of_previous_layer_this_degree, inputs['bond_features_degree_'+str(degree)]], mode='concat', concat_axis=1)

        activations = layers.Dense(conv_width, activation='relu', bias=False, name='activations_{}_degree_{}'.format(layer_index, degree))(merged_atom_bond_features)

        activations_by_degree.append(activations)

    # skip-connection to output/final fingerprint
    output_to_fingerprint_tmp = layers.Dense(fp_length, activation='softmax', name = 'fingerprint_skip_connection_{}'.format(layer_index))(atom_features_of_previous_layer) # (variable_a, fp_length)
    #(variable_a, fp_length)
    output_to_fingerprint     = layers.Lambda(lambda x: backend.dot(inputs['atom_batch_matching_matrix_degree_'+str(degree)], x))(output_to_fingerprint_tmp)  # layers.Lambda(lambda x: backend.dot(inputs['atom_batch_matching_matrix_degree_'+str(degree)], x))(output_to_fingerprint_tmp) # (batch_size, fp_length)

    # connect to next layer
    this_activations_tmp = layers.Dense(conv_width, activation='relu', name='layer_{}_activations'.format(layer_index))(atom_features_of_previous_layer) # (variable_a, conv_width)
    # (variable_a, conv_width)
    merged_neighbor_activations = layers.merge(activations_by_degree, mode='concat',concat_axis=0)

    new_atom_features = layers.Lambda(lambda x:merged_neighbor_activations + x)(this_activations_tmp ) #(variable_a, conv_width)
    if batch_normalization:
        new_atom_features = layers.normalization.BatchNormalization()(new_atom_features)

    #new_atom_features = layers.Lambda(backend.relu)(new_atom_features) #(variable_a, conv_width)

    return new_atom_features, output_to_fingerprint
项目:yolov2    作者:datlife    | 项目源码 | 文件源码
def relu6(x):
    return K.relu(x, max_value=6)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters_simple,
                 filters_complex,
                 filters_temporal,
                 spatial_kernel_size,
                 temporal_frequencies,
                 spatial_kernel_initializer='glorot_uniform',
                 temporal_kernel_initializer='glorot_uniform',
                 temporal_frequencies_initializer=step_init,
                 temporal_frequencies_initial_max=2,
                 temporal_frequencies_scaling=10,
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 spatial_kernel_regularizer=None,
                 temporal_kernel_regularizer=None,
                 temporal_frequencies_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 spatial_kernel_constraint=None,
                 temporal_kernel_constraint=None,
                 temporal_frequencies_constraint=None,
                 bias_constraint=None,
                 use_bias=True, **kwargs):

        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.filters_temporal = filters_temporal
        self.spatial_kernel_size = spatial_kernel_size
        self.temporal_frequencies = temporal_frequencies
        self.temporal_frequencies_initial_max = np.float32(temporal_frequencies_initial_max)
        self.temporal_frequencies_scaling = np.float32(temporal_frequencies_scaling)
        self.spatial_kernel_initializer = initializers.get(spatial_kernel_initializer)
        self.temporal_kernel_initializer = initializers.get(temporal_kernel_initializer)
        self.temporal_frequencies_initializer = initializers.get(temporal_frequencies_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_first, channels_last}'
        self.data_format = data_format

        self.spatial_kernel_regularizer = regularizers.get(spatial_kernel_regularizer)
        self.temporal_kernel_regularizer = regularizers.get(temporal_kernel_regularizer)
        self.temporal_frequencies_regularizer = regularizers.get(temporal_frequencies_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.spatial_kernel_constraint = constraints.UnitNormOrthogonal(self.filters_complex + self.filters_simple)
        self.temporal_kernel_constraint = constraints.get(temporal_kernel_constraint)
        self.temporal_frequencies_constraint = constraints.get(temporal_frequencies_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=5)]

        super(Convolution2DEnergy_TemporalBasis2, self).__init__(**kwargs)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters_simple,
                 filters_complex,
                 filters_temporal,
                 spatial_kernel_size,
                 temporal_frequencies,
                 spatial_kernel_initializer='glorot_uniform',
                 temporal_kernel_initializer='glorot_uniform',
                 temporal_frequencies_initializer=step_init,
                 temporal_frequencies_initial_max=2,
                 temporal_frequencies_scaling=10,
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 spatial_kernel_regularizer=None,
                 temporal_kernel_regularizer=None,
                 temporal_frequencies_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 spatial_kernel_constraint=None,
                 temporal_kernel_constraint=None,
                 temporal_frequencies_constraint=None,
                 bias_constraint=None,
                 use_bias=True, **kwargs):

        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.filters_temporal = filters_temporal
        self.spatial_kernel_size = spatial_kernel_size
        self.temporal_frequencies = temporal_frequencies
        self.temporal_frequencies_initial_max = np.float32(temporal_frequencies_initial_max)
        self.temporal_frequencies_scaling = np.float32(temporal_frequencies_scaling)
        self.spatial_kernel_initializer = initializers.get(spatial_kernel_initializer)
        self.temporal_kernel_initializer = initializers.get(temporal_kernel_initializer)
        self.temporal_frequencies_initializer = initializers.get(temporal_frequencies_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_first, channels_last}'
        self.data_format = data_format

        self.spatial_kernel_regularizer = regularizers.get(spatial_kernel_regularizer)
        self.temporal_kernel_regularizer = regularizers.get(temporal_kernel_regularizer)
        self.temporal_frequencies_regularizer = regularizers.get(temporal_frequencies_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.spatial_kernel_constraint = constraints.UnitNormOrthogonal(self.filters_complex + self.filters_simple)
        self.temporal_kernel_constraint = constraints.get(temporal_kernel_constraint)
        self.temporal_frequencies_constraint = constraints.get(temporal_frequencies_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=5)]

        super(Convolution2DEnergy_TemporalBasis3, self).__init__(**kwargs)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters_simple,
                 filters_complex,
                 kernel_size,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 use_bias=True,
                 **kwargs):

        if padding not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2DEnergy_Scatter:', padding)
        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.kernel_size = kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.data_format = data_format

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.UnitNormOrthogonal(filters_complex, singles=True)
        self.bias_constraint = constraints.get(bias_constraint)

        self.epsilon = K.constant(K.epsilon())

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        super(Convolution2DEnergy_Scatter, self).__init__(**kwargs)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters_simple,
                 filters_complex,
                 kernel_size,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 use_bias=True,
                 **kwargs):

        if padding not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2DEnergy_Scatter:', padding)
        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.kernel_size = kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.data_format = data_format

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.UnitNormOrthogonal(filters_complex, singles=True)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        super(Convolution2DEnergy_Scatter2, self).__init__(**kwargs)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self,
                 kernel_size,
                 filters_mult=1,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 use_bias=True,
                 **kwargs):

        if padding not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2DEnergy_Separable:', padding)
        self.filters_mult = filters_mult
        self.kernel_size = kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.data_format = data_format

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.UnitNormOrthogonal(0, singles=True, interleave=True)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        super(Convolution2DEnergy_Separable, self).__init__(**kwargs)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def __init__(self, input_layer, input_layer_size, num_classes, one_hot_dims=False,
                    scope=None, dense=None, **kwargs):
        if not hasattr(num_classes, "__len__"):
            num_classes = (num_classes, )

        self._num_classes = num_classes
        self._one_hot_dims = one_hot_dims
        self._dim_models = []
        train_losses = []
        test_losses = []
        self._labels = tf.placeholder(tf.int32, [None, len(num_classes)])
        for dim, dimsize in enumerate(num_classes):
            dim_layer = input_layer
            dim_layer_size = input_layer_size
            if dim > 0:
                if self._one_hot_dims:
                    # Use a one-hot encoding for the previous dims
                    prev_dims_layer = tf.concat([tf.one_hot(self._labels[:,i], c) for i, c in enumerate(self._num_classes[:dim])], axis=1)
                    prev_dims_layer_size = np.sum(self._num_classes[:dim])
                else:
                    # Use a real-valued scalar [-1,1] encoding for the previous dims
                    prev_dims_layer = tf.to_float(self._labels[:,:dim]) / np.array(num_classes, dtype=float)[:dim][np.newaxis, :] * 2 - 1
                    prev_dims_layer_size = dim

                if dense is not None:
                    for d in dense:
                        prev_dims_layer = Dense(d, W_regularizer=l2(0.01), activation=K.relu)(prev_dims_layer)
                        prev_dims_layer = Dropout(0.5)(prev_dims_layer)
                        prev_dims_layer_size = d
                        print 'Adding dense', d, prev_dims_layer
                dim_layer = tf.concat([dim_layer, prev_dims_layer], axis=1)
                dim_layer_size += prev_dims_layer_size
            print 'Dim layer: ', dim_layer
            dim_model = LocallySmoothedMultiscaleLayer(dim_layer, dim_layer_size, dimsize, scope=scope, **kwargs)
            train_losses.append(dim_model.train_loss)
            test_losses.append(dim_model.test_loss)
            self._dim_models.append(dim_model)

        self._train_loss = tf.reduce_sum(train_losses)
        self._test_loss = tf.reduce_sum(test_losses)

        self._density = None
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def build_fingerprint_regression_model(fp_length = 50, fp_depth = 4, conv_width = 20, 
                                             predictor_MLP_layers = [200, 200, 200], 
                                             L2_reg = 4e-4, num_input_atom_features = 62, 
                                             num_bond_features = 6, batch_normalization = False):
    """
    fp_length   # Usually neural fps need far fewer dimensions than morgan.
    fp_depth     # The depth of the network equals the fingerprint radius.
    conv_width   # Only the neural fps need this parameter.
    h1_size     # Size of hidden layer of network on top of fps.

    """

    inputs = {}

    inputs['input_atom_features'] = layers.Input(name='input_atom_features', shape=(num_input_atom_features,))
    for degree in config.ATOM_DEGREES:
        inputs['bond_features_degree_'+str(degree)] = layers.Input(name='bond_features_degree_'+str(degree), 
                                                            shape=(num_bond_features,))
        inputs['atom_features_selector_matrix_degree_'+str(degree)] = layers.Input(name='atom_features_selector_matrix_degree_'+str(degree), shape=(None,)) #todo shape

        inputs['atom_batch_matching_matrix_degree_'+str(degree)] = layers.Input(name='atom_batch_matching_matrix_degree_'+str(degree), shape=(None,)) # shape is (batch_size, variable_a)


    if 1:
        atom_features = inputs['input_atom_features']

        all_outputs_to_fingerprint = []

        num_atom_features = num_input_atom_features
        for i in range(fp_depth):
            atom_features, output_to_fingerprint = neural_fingerprint_layer(inputs, atom_features_of_previous_layer = atom_features, 
                                                                            num_atom_features = num_atom_features, conv_width = conv_width, 
                                                                            fp_length = fp_length, L2_reg = L2_reg, 
                                                                            num_bond_features = num_bond_features, 
                                                                            batch_normalization = batch_normalization,
                                                                            layer_index = i)
            num_atom_features = conv_width
            all_outputs_to_fingerprint.append(output_to_fingerprint)

        # This is the actual fingerprint, we will feed it into an MLP for prediction  -- shape is (batch_size, fp_length)
        neural_fingerprint = layers.merge(all_outputs_to_fingerprint, mode='sum') if len(all_outputs_to_fingerprint)>1 else all_outputs_to_fingerprint


    Prediction_MLP_layer = neural_fingerprint

    for i, hidden in enumerate(predictor_MLP_layers):

        Prediction_MLP_layer = layers.Dense(hidden, activation='relu', W_regularizer=regularizers.l2(L2_reg), name='MLP_hidden_'+str(i))(Prediction_MLP_layer)



    main_prediction = layers.Dense(1, activation='linear', name='main_prediction')(Prediction_MLP_layer)

    model = models.Model(input=inputs.values(), output=[main_prediction])
    model.compile(optimizer=optimizers.Adam(), loss={'main_prediction':'mse'})
    return model
项目:stock-predict-by-RNN-LSTM    作者:blockchain99    | 项目源码 | 文件源码
def __prepare_model(self):
        print('Build model...')
        model = Sequential()
        model.add(TimeDistributedDense(output_dim=self.hidden_cnt,
                                       input_dim=self.input_dim,
                                       input_length=self.input_length,
                                       activation='sigmoid'))
#         model.add(TimeDistributed(Dense(output_dim=self.hidden_cnt,
#                                         input_dim=self.input_dim,
#                                         input_length=self.input_length,
#                                         activation='sigmoid')))
# my modification since import error from keras.layers.core import TimeDistributedMerge
#         model.add(TimeDistributedMerge(mode='ave'))   #comment by me

##################### my ref #########################################################
# # add a layer that returns the concatenation
# # of the positive part of the input and
# # the opposite of the negative part
# 
# def antirectifier(x):
#     x -= K.mean(x, axis=1, keepdims=True)
#     x = K.l2_normalize(x, axis=1)
#     pos = K.relu(x)
#     neg = K.relu(-x)
#     return K.concatenate([pos, neg], axis=1)
# 
# def antirectifier_output_shape(input_shape):
#     shape = list(input_shape)
#     assert len(shape) == 2  # only valid for 2D tensors
#     shape[-1] *= 2
#     return tuple(shape)
# 
# model.add(Lambda(antirectifier, output_shape=antirectifier_output_shape))
#############################################################################

        model.add(Lambda(function=lambda x: K.mean(x, axis=1), 
                   output_shape=lambda shape: (shape[0],) + shape[2:]))
#         model.add(Dropout(0.5))
        model.add(Dropout(0.93755))
        model.add(Dense(self.hidden_cnt, activation='tanh'))
        model.add(Dense(self.output_dim, activation='softmax'))

        # try using different optimizers and different optimizer configs
        print('Compile model...')
#         sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#         model.compile(loss='categorical_crossentropy', optimizer=sgd)
#         return model
##my add
        adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adagrad)
        return model