Python keras.backend 模块,sigmoid() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.sigmoid()

项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def call(self, x):
        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # note that the .build() method of subclasses MUST define
        # self.input_spec with a complete input shape.
        input_shape = self.input_spec[0].shape

        if self.window_size > 1:
            x = K.temporal_padding(x, (self.window_size-1, 0))
        x = K.expand_dims(x, 2)  # add a dummy dimension

        # z, g
        output = K.conv2d(x, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')
        z  = output[:, :, :self.output_dim]
        g = output[:, :, self.output_dim:]

        return self.activation(z) * K.sigmoid(g)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def call(self, inputs):
        if self.data_format == 'channels_first':
            sq = K.mean(inputs, [2, 3])
        else:
            sq = K.mean(inputs, [1, 2])

        ex = K.dot(sq, self.kernel1)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias1)
        ex= K.relu(ex)

        ex = K.dot(ex, self.kernel2)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias2)
        ex= K.sigmoid(ex)

        if self.data_format == 'channels_first':
            ex = K.expand_dims(ex, -1)
            ex = K.expand_dims(ex, -1)
        else:
            ex = K.expand_dims(ex, 1)
            ex = K.expand_dims(ex, 1)

        return inputs * ex
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def model_fn(input_dim,
             labels_dim,
             hidden_units=[100, 70, 50, 20],
             learning_rate=0.1):
  """Create a Keras Sequential model with layers."""
  model = models.Sequential()

  for units in hidden_units:
    model.add(layers.Dense(units=units,
                           input_dim=input_dim,
                           activation=relu))
    input_dim = units

  # Add a dense final layer with sigmoid function
  model.add(layers.Dense(labels_dim, activation=sigmoid))
  compile_model(model, learning_rate)
  return model
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def cnn(height, width):
    question_input = Input(shape=(height, width, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.25)(F1_Q)
    predictQ = Dense(32, activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q)
    prediction2 = Dropout(0.25)(predictQ)
    predictions = Dense(1, activation='relu')(prediction2)
    model = Model(inputs=[question_input],
                  outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def setup_output(self):
        """
        Setup output tensor

        """

        coordinates = get_coordinates(self.output_shape,
                                      input_channels=self.input_channels,
                                      num_filters=self.num_filters)

        num_parameters = np.prod(self.output_shape) * self.num_filters * \
                         self.input_channels
        print (num_parameters)
        # self.z_r = K.repeat_elements(self.z, rep=num_parameters, axis=0)
        self.z_r = self.init((num_parameters, 4))
        # coordinates = K.concatenate([self.z_r, coordinates], axis=1)

        output = K.tanh(K.dot(self.z_r, self.weights[0]) + self.biases[0])

        for i in range(1, len(self.weights) - 1):
            output = K.tanh(K.dot(output, self.weights[i]) + self.biases[i])
        output = K.sigmoid(K.dot(output, self.weights[-1]) + self.biases[-1])

        self.output = K.reshape(output, (self.num_filters, self.input_channels,
                                         *self.output_shape))
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model):
        if self.crop_right:
            model = Lambda(lambda x: x[:, :, :K.int_shape(x)[2]-1, :])(model)

        if self.v is not None:
            model = Merge(mode='sum')([model, self.v])

        if self.h is not None:
            hV = Dense(output_dim=2*self.filters)(self.h)
            hV = Reshape((1, 1, 2*self.filters))(hV)
            model = Lambda(lambda x: x[0]+x[1])([model,hV])

        model_f = Lambda(lambda x: x[:,:,:,:self.filters])(model)
        model_g = Lambda(lambda x: x[:,:,:,self.filters:])(model)

        model_f = Lambda(lambda x: K.tanh(x))(model_f)
        model_g = Lambda(lambda x: K.sigmoid(x))(model_g)

        res = Merge(mode='mul')([model_f, model_g])
        return res
项目:leaf-classification    作者:MWransky    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # compute the candidate hidden state
        transform = K.conv2d(x, self.W, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            transform += K.reshape(self.b, (1, 1, 1, self.nb_filter))
        transform = self.activation(transform)

        transform_gate = K.conv2d(x, self.W_gate, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            transform_gate += K.reshape(self.b_gate, (1, 1, 1, self.nb_filter))
        transform_gate = K.sigmoid(transform_gate)
        carry_gate = 1.0 - transform_gate

        return transform * transform_gate + x * carry_gate

    # Define get_config method so load_from_json can run
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def register(self, info_tensor, param_tensor):
        self.info_tensor = info_tensor #(128,1)

        if self.stddev_fix:
            self.param_tensor = param_tensor

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
            std  = 1.0
        else:
            self.param_tensor = param_tensor # 2 

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
          # std  = K.maximum( param_tensor[:, 1].dimshuffle(0, 'x'), 0)
            std  = K.sigmoid( param_tensor[:, 1].dimshuffle(0, 'x') )

        e = (info_tensor-mean)/(std + K.epsilon())
        self.log_Q_c_given_x = \
            K.sum(-0.5*np.log(2*np.pi) -K.log(std+K.epsilon()) -0.5*(e**2), axis=1) * self.lmbd

#       m = Sequential([ Activation('softmax', input_shape=(self.n,)), Lambda(lambda x: K.log(x), lambda x: x) ])
        return K.reshape(self.log_Q_c_given_x, (-1, 1))
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def sample_h_given_x(self, x):
        """
        Draw sample from p(h|x).

        For Bernoulli RBM the conditional probability distribution can be derived to be
           p(h_j=1|x) = sigmoid(x^T W[:,j] + bh_j).
        """
        h_pre = K.dot(x, self.W) + self.bh          # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
        #h_sigm = K.sigmoid(h_pre)              # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
    h_sigm = self.activation(self.scaling_h_given_x * h_pre)

    # drop out noise
    if(0.0 < self.p < 1.0):
             noise_shape = self._get_noise_shape(h_sigm)
             h_sigm = K.in_train_phase(K.dropout(h_sigm, self.p, noise_shape), h_sigm)

        h_samp = random_binomial(shape=h_sigm.shape, n=1, p=h_sigm)
                            # random sample
                            #   \hat{h} = 1,      if p(h=1|x) > uniform(0, 1)
                            #             0,      otherwise

        return h_samp, h_pre, h_sigm
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def sample_x_given_h(self, h):
        """
        Draw sample from p(x|h).

        For Bernoulli RBM the conditional probability distribution can be derived to be
           p(x_i=1|h) = sigmoid(W[i,:] h + bx_i).
        """
        # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
        x_pre = K.dot(h, self.W.T) + self.bx

        # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
        x_sigm = K.sigmoid(self.scaling_x_given_h  * x_pre)
        #x_sigm = self.activation(self.scaling_x_given_h * x_pre)

    x_samp = random_binomial(shape=x_sigm.shape, n=1, p=x_sigm)
        # random sample
        #   \hat{x} = 1,      if p(x=1|h) > uniform(0, 1)
        #             0,      otherwise

        # pre and sigm are returned to compute cross-entropy
        return x_samp, x_pre, x_sigm
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloconfidloss(y_true, y_pred, t):
    real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
    pobj = K.sigmoid(y_pred)
    lo = K.square(real_y_true-pobj)
    value_if_true = lamda_confid_obj*(lo)
    value_if_false = lamda_confid_noobj*(lo)
    loss1 = tf.select(t, value_if_true, value_if_false)

    loss = K.mean(loss1) 
    #
    noobj = tf.select(t, K.zeros_like(y_pred), pobj)
    noobjcount = tf.select(t, K.zeros_like(y_pred), K.ones_like(y_pred))
    ave_anyobj = K.sum(noobj) / K.sum(noobjcount)
    #ave_anyobj = K.mean(pobj)
    obj = tf.select(t, pobj, K.zeros_like(y_pred))
    objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
    #ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
    ave_obj =  K.sum(obj) / (K.sum(objcount)+0.000001)  # prevent div 0
    return loss, ave_anyobj, ave_obj

# shape is (gridcells*2,)
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        vP_t = inputs
        hP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks 
        vP, WP_v, WPP_v, v, W_g2 = states[3:8]
        vP_mask, = states[8:]

        WP_v_Dot = K.dot(vP, WP_v)
        WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v)

        s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot)
        s_t = K.dot(s_t_hat, v)
        s_t = K.batch_flatten(s_t)

        a_t = softmax(s_t, mask=vP_mask, axis=1)

        c_t = K.batch_dot(a_t, vP, axes=[1, 1])

        GRU_inputs = K.concatenate([vP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g2))
        GRU_inputs = g * GRU_inputs

        hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states)

        return hP_t, s
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        uP_t = inputs
        vP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks
        uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9]
        uQ_mask, = states[9:10]

        WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u
        WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v
        WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u

        s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot)

        s_t = K.dot(s_t_hat, v) # v
        s_t = K.batch_flatten(s_t)
        a_t = softmax(s_t, mask=uQ_mask, axis=1)
        c_t = K.batch_dot(a_t, uQ, axes=[1, 1])

        GRU_inputs = K.concatenate([uP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g1))  # W_g1
        GRU_inputs = g * GRU_inputs
        vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states)

        return vP_t, s
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def step(self, x, states):
        h, [h, c] = super(AttentionLSTM, self).step(x, states)
        attention = states[4]

        m = self.attn_activation(K.dot(h, self.U_a) * attention + self.b_a)
        # Intuitively it makes more sense to use a sigmoid (was getting some NaN problems
        # which I think might have been caused by the exponential function -> gradients blow up)
        s = K.sigmoid(K.dot(m, self.U_s) + self.b_s)

        if self.single_attention_param:
            h = h * K.repeat_elements(s, self.output_dim, axis=1)
        else:
            h = h * s

        return h, [h, c]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def step(self, x, states):
        h, [h, c] = self.layer.step(x, states)
        attention = states[4]

        m = self.attn_activation(K.dot(h, self.U_a) * attention + self.b_a)
        s = K.sigmoid(K.dot(m, self.U_s) + self.b_s)

        if self.single_attention_param:
            h = h * K.repeat_elements(s, self.layer.output_dim, axis=1)
        else:
            h = h * s

        return h, [h, c]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def step(self, x, states):
        h, [h, c] = super(AttentionLSTM, self).step(x, states)
        attention = states[4]

        m = self.attn_activation(K.dot(h, self.U_a) * attention + self.b_a)
        # Intuitively it makes more sense to use a sigmoid (was getting some NaN problems
        # which I think might have been caused by the exponential function -> gradients blow up)
        s = K.sigmoid(K.dot(m, self.U_s) + self.b_s)

        if self.single_attention_param:
            h = h * K.repeat_elements(s, self.output_dim, axis=1)
        else:
            h = h * s

        return h, [h, c]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def step(self, x, states):
        h, [h, c] = self.layer.step(x, states)
        attention = states[4]

        m = self.attn_activation(K.dot(h, self.U_a) * attention + self.b_a)
        s = K.sigmoid(K.dot(m, self.U_s) + self.b_s)

        if self.single_attention_param:
            h = h * K.repeat_elements(s, self.layer.output_dim, axis=1)
        else:
            h = h * s

        return h, [h, c]
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def step(self, inputs, states):
        prev_output = states[0]

        z = inputs[:, :self.units]
        f = inputs[:, self.units:2 * self.units]
        o = inputs[:, 2 * self.units:]

        z = self.activation(z)
        f = f if self.dropout is not None and 0. < self.dropout < 1. else K.sigmoid(f)
        o = K.sigmoid(o)

        output = f * prev_output + (1 - f) * z
        output = o * output

        return output, [output]
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def ranking_loss(y_true, y_pred):
    pos = y_pred[:,0]
    neg = y_pred[:,1]
    loss = -K.sigmoid(pos-neg) # use loss = K.maximum(1.0 + neg - pos, 0.0) if you want to use margin ranking loss
    return K.mean(loss) + 0 * y_true
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def Margin_Loss(y_true, y_pred):
    score_best = y_pred[0]
    score_predict = y_pred[1]
    loss = K.maximum(0.0, 1.0 - K.sigmoid(score_best - score_predict))
    return K.mean(loss) + 0 * y_true
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def Margin_Loss(y_true, y_pred):
    score_best = y_pred[0]
    score_predict = y_pred[1]
    loss = K.maximum(0.0, 1.0 - K.sigmoid(score_best - score_predict))
    return K.mean(loss) + 0 * y_true
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def cnn(height_a, height_q, width_a, width_q, extra_len):
    question_input = Input(shape=(height_q, width_q, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 128), activation='sigmoid', padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.5)(F1_Q)
    predictQ = Dense(64, activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_Q)


    # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01)
    answer_input = Input(shape=(height_a, width_a, 1), name='answer_input')
    conv1_A = Conv2D(512, (2, 128), activation='sigmoid', padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(answer_input)
    Max1_A = MaxPooling2D((319, 1), strides=(1, 1), padding='valid')(conv1_A)
    F1_A = Flatten()(Max1_A)
    Drop1_A = Dropout(0.5)(F1_A)
    predictA = Dense(64, activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_A)

    extra_input = Input(shape=(extra_len,), name='extra_input')
    predictQ1 = concatenate([predictQ, extra_input], axis=1)
    predictA1 = concatenate([predictA, extra_input], axis=1)
    predictions = merge([predictA1, predictQ1], mode='dot')
    model = Model(inputs=[question_input, answer_input, extra_input],
                  outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def setup_output(self):
        """
        Setup output tensor
        """

        coordinates = get_coordinates_2D(self.output_shape, scale=self.scale)

        output = K.sin(K.dot(coordinates, self.weights[0]) + self.biases[0])

        for i in range(1, len(self.weights) - 1):
            output = K.tanh(K.dot(output, self.weights[i]) + self.biases[i])
        output = K.sigmoid(K.dot(output, self.weights[-1]) + self.biases[-1])

        self.output = K.reshape(output, self.output_shape)
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def get_output(self, z):
        """
        Return output using the given z
        z has shape (batch_size, z_dim)
        """

        assert len(z.shape) == 2
        assert self.z_dim == z.shape[1]

        total_values = np.prod(self.output_shape)
        batch_total = total_values * z.shape[0]

        z_rep = K.repeat_elements(K.expand_dims(z, 1), total_values, 1)

        coords_rep = K.repeat_elements(
            K.expand_dims(self.coordinates, 0), z.shape[0], 0)

        coords_rep = K.reshape(coords_rep,
                               (batch_total, self.coordinates.shape[1]))
        z_rep = K.reshape(z_rep, (batch_total, z.shape[1]))

        # Add z and coords to first layer
        output = K.sin(K.dot(coords_rep, self.weights[0]) + self.biases[0] +
                       K.dot(z_rep, self.weights[-1]))

        for i in range(1, len(self.layer_sizes)):
            output = K.tanh(K.dot(output, self.weights[i]) + self.biases[i])

        # Using -2 for weights since -1 is z vector weight
        output = K.sigmoid(K.dot(output, self.weights[-2]) + self.biases[-1])

        return K.reshape(output, (z.shape[0], *self.output_shape))
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def sigmoid(a, b):
    """Sigmoid similarity. Maximum is 1 (a == b), minimum is 0."""

    return K.sigmoid(K.sum(a * b, axis=-1))
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def geometric(a, b):
    """Geometric mean of sigmoid and euclidian similarity."""

    return sigmoid(a, b) * euclidean(a, b)
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def arithmetic(a, b):
    """Arithmetic mean of sigmoid and euclidean similarity."""

    return (sigmoid(a, b) + euclidean(a, b)) * 0.5
项目:LINE    作者:VahidooX    | 项目源码 | 文件源码
def LINE_loss(y_true, y_pred):
    coeff = y_true*2 - 1
    return -K.mean(K.log(K.sigmoid(coeff*y_pred)))
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def step(self, x, states):
        h, params = self.layer.step(x, states)
        attention = states[-1]

        m = self.attn_activation(K.dot(h, self.U_a) * attention + self.b_a)
        s = K.sigmoid(K.dot(m, self.U_s) + self.b_s)

        if self.single_attention_param:
            h = h * K.repeat_elements(s, self.layer.units, axis=1)
        else:
            h = h * s

        return h, params
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        char = inputs[0]
        word = inputs[1]
        g = K.sigmoid(K.dot(word, self.v) + self.b)
        return (1. - g)[:, :, None] * word + g[:, :, None] * char
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        char = inputs[0]
        word = inputs[1]
        g = K.sigmoid(K.dot(word, self.v) + self.b)
        return (1. - g) * word + g * char
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        char = inputs[0]
        word = inputs[1]
        g = K.sigmoid(word * self.v + self.b)
        return (1. - g) * word + g * char
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def discriminator_loss(discrim_output_prior, discrim_output_posterior, from_logits=False):
        if from_logits:
            discrim_output_posterior = ker.sigmoid(discrim_output_posterior)
            discrim_output_prior = ker.sigmoid(discrim_output_prior)
        # The dicriminator loss is the GAN loss with input from the prior and posterior distributions
        discriminator_loss = ker.mean(binary_crossentropy(y_pred=discrim_output_posterior,
                                                          y_true=ker.ones_like(discrim_output_posterior))
                                      + binary_crossentropy(y_pred=discrim_output_prior,
                                                            y_true=ker.zeros_like(discrim_output_prior)))
        return discriminator_loss
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return K.sigmoid(x) * self.scaling
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def add_output_layers(stem, output_names, init='glorot_uniform'):
    """Add and return outputs to a given layer.

    Adds output layer for each output in `output_names` to layer `stem`.

    Parameters
    ----------
    stem: Keras layer
        Keras layer to which output layers are added.
    output_names: list
        List of output names.

    Returns
    -------
    list
        Output layers added to `stem`.
    """
    outputs = []
    for output_name in output_names:
        _output_name = output_name.split(OUTPUT_SEP)
        if _output_name[-1] in ['entropy']:
            x = kl.Dense(1, kernel_initializer=init, activation='relu')(stem)
        elif _output_name[-1] in ['var']:
            x = kl.Dense(1, kernel_initializer=init)(stem)
            x = ScaledSigmoid(0.251, name=output_name)(x)
        elif _output_name[-1] in ['cat_var']:
            x = kl.Dense(3, kernel_initializer=init,
                         activation='softmax',
                         name=output_name)(stem)
        else:
            x = kl.Dense(1, kernel_initializer=init,
                         activation='sigmoid',
                         name=output_name)(stem)
        outputs.append(x)
    return outputs
项目:pixelcnn_keras    作者:suga93    | 项目源码 | 文件源码
def __call__(self, xW, layer_idx):
        '''calculate gated activation maps given input maps '''
        if self.stack_name == 'vertical':
            stack_tag = 'v'
        elif self.stack_name == 'horizontal':
            stack_tag = 'h'

        if self.crop_right:
            xW = Lambda(self._crop_right, name='h_crop_right_'+str(layer_idx))(xW)

        if self.v_map is not None:
            xW = merge([xW, self.v_map], mode='sum', name='h_merge_v_'+str(layer_idx))

        if self.h is not None:
            hV = Dense(output_dim=2*self.nb_filters, name=stack_tag+'_dense_latent_'+str(layer_idx))(self.h)
            hV = Reshape((1, 1, 2*self.nb_filters), name=stack_tag+'_reshape_latent_'+str(layer_idx))(hV)
            #xW = merge([xW, hV], mode=lambda x: x[0]+x[1])
            xW = Lambda(lambda x: x[0]+x[1], name=stack_tag+'_merge_latent_'+str(layer_idx))([xW,hV])

        xW_f = Lambda(lambda x: x[:,:,:,:self.nb_filters], name=stack_tag+'_Wf_'+str(layer_idx))(xW)
        xW_g = Lambda(lambda x: x[:,:,:,self.nb_filters:], name=stack_tag+'_Wg_'+str(layer_idx))(xW)

        xW_f = Lambda(lambda x: K.tanh(x), name=stack_tag+'_tanh_'+str(layer_idx))(xW_f)
        xW_g = Lambda(lambda x: K.sigmoid(x), name=stack_tag+'_sigmoid_'+str(layer_idx))(xW_g)

        res = merge([xW_f, xW_g], mode='mul', name=stack_tag+'_merge_gate_'+str(layer_idx))
        #print(type(res), K.int_shape(res), hasattr(res, '_keras_history'))
        return res
项目:HighwayNetwork    作者:trangptm    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # compute the candidate hidden state
        transform = K.conv2d(x, self.W, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            if self.dim_ordering == 'th':
                transform += K.reshape(self.b, (1, self.nb_filter, 1, 1))
            elif self.dim_ordering == 'tf':
                transform += K.reshape(self.b, (1, 1, 1, self.nb_filter))
            else:
                raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
        transform = self.activation(transform)

        transform_gate = K.conv2d(x, self.W_gate, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            if self.dim_ordering == 'th':
                transform_gate += K.reshape(self.b_gate, (1, self.nb_filter, 1, 1))
            elif self.dim_ordering == 'tf':
                transform_gate += K.reshape(self.b_gate, (1, 1, 1, self.nb_filter))
            else:
                raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
        transform_gate = K.sigmoid(transform_gate)
        carry_gate = 1.0 - transform_gate

        return transform * transform_gate + x * carry_gate
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return self.scaling * K.sigmoid(x / self.scaling)
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def reconstruction_loss(self, x, dummy):
        """
        Compute binary cross-entropy between the binary input data and the reconstruction generated by the model.

        Result is a Theano expression with the form loss = f(x).

        Useful as a rough indication of training progress (see Hinton2010).
        Summed over feature dimensions, mean over samples.
        """

        def loss(x):
            _, pre, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
            # NOTE:
            #   when computing log(sigmoid(x)) and log(1 - sigmoid(x)) of cross-entropy,
            #   if x is very big negative, sigmoid(x) will be 0 and log(0) will be nan or -inf
            #   if x is very big positive, sigmoid(x) will be 1 and log(1-0) will be nan or -inf
            #   Theano automatically rewrites this kind of expression using log(sigmoid(x)) = -softplus(-x), which
            #   is more stable numerically
            #   however, as the sigmoid() function used in the reconstruction is inside a scan() operation, Theano
            #   doesn't 'see' it and is not able to perform the change; as a work-around we use pre-sigmoid value
            #   generated inside the scan() and apply the sigmoid here
            #
            # NOTE:
            #   not sure how important this is; in most cases seems to work fine using just T.nnet.binary_crossentropy()
            #   for instance; keras.objectives.binary_crossentropy() simply clips the value entering the log(); and
            #   this is only used for monitoring, not calculating gradient
            cross_entropy_loss = -T.mean(T.sum(x*T.log(T.nnet.sigmoid(pre)) + (1 - x)*T.log(1 - T.nnet.sigmoid(pre)), axis=1))
        #cross_entropy_loss = -T.mean(T.sum(x*T.log(self.activation(pre)) + (1 - x)*T.log(1 - self.activation(pre)), axis=1))
            return cross_entropy_loss
    y = loss(x)
        return y
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def get_x_given_h_layer(self, as_initial_layer=False):
        """
        Generates a new Dense Layer that computes mean of Bernoulli distribution p(x|h), ie. p(x=1|h).
        """
        if as_initial_layer:
            layer = Dense(input_dim=self.hidden_dim, output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
        else:
            layer = Dense(output_dim=self.input_dim, activation='sigmoid', weights=[self.W.get_value().T, self.bx.get_value()])
        return layer
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def sample_h_given_x(self, x):

        h_pre = K.dot(x, self.W) + self.bh
    h_sigm = K.maximum(self.scaling_h_given_x * h_pre, 0)
    #std = K.mean(K.sigmoid(self.scaling_h_given_x * h_pre))
    #eta = random_normal(shape=h_pre.shape, std=std)
    #h_samp = K.maximum(h_pre + eta, 0)
    h_samp = nrlu(h_pre)

        return h_samp, h_pre, h_sigm
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloxyloss(y_true, y_pred, t):
    real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
        lo = K.square(real_y_true-K.sigmoid(y_pred))
        value_if_true = lamda_xy*(lo)
        value_if_false = K.zeros_like(y_true)
        loss1 = tf.select(t, value_if_true, value_if_false)
    #return K.mean(value_if_true)
    objsum = K.sum(y_true)
    return K.sum(loss1)/(objsum+0.0000001)

# different with YOLO
# shape is (gridcells*2,)
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yolowhloss(y_true, y_pred, t):
    real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
        lo = K.square(K.sqrt(real_y_true)-K.sqrt(K.sigmoid(y_pred)))   
    # let w,h not too small or large
        #lo = K.square(y_true-y_pred)+reguralar_wh*K.square(0.5-y_pred)
        value_if_true = lamda_wh*(lo)
        value_if_false = K.zeros_like(y_true)
        loss1 = tf.select(t, value_if_true , value_if_false)
    #return K.mean(loss1/(y_true+0.000000001))
    #return K.mean(value_if_true)
    objsum = K.sum(y_true)
    return K.sum(loss1)/(objsum+0.0000001)

# shape is (gridcells*classes,)
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloconfidloss(y_true, y_pred, t):
    pobj = K.sigmoid(y_pred)
    lo = K.square(y_true-pobj)
    value_if_true = lamda_confid_obj*(lo)
    value_if_false = lamda_confid_noobj*(lo)
    loss1 = tf.select(t, value_if_true, value_if_false)
    loss = K.mean(loss1) #,axis=0)
    #
    ave_anyobj = K.mean(pobj)
    obj = tf.select(t, pobj, K.zeros_like(y_pred))
    objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
    ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
    return loss, ave_anyobj, ave_obj

# shape is (gridcells*2,)
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloxyloss(y_true, y_pred, t):
        lo = K.square(y_true-K.sigmoid(y_pred))
        value_if_true = lamda_xy*(lo)
        value_if_false = K.zeros_like(y_true)
        loss1 = tf.select(t, value_if_true, value_if_false)
    return K.mean(loss1)

# different with YOLO
# shape is (gridcells*2,)
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def iou(x_true,y_true,w_true,h_true,x_pred,y_pred,w_pred,h_pred,t):
    xoffset = K.cast_to_floatx((np.tile(np.arange(side),side)))
    yoffset = K.cast_to_floatx((np.repeat(np.arange(side),side)))
    x = tf.select(t, K.sigmoid(x_pred), K.zeros_like(x_pred)) 
    y = tf.select(t, K.sigmoid(y_pred), K.zeros_like(y_pred))
    w = tf.select(t, K.sigmoid(w_pred), K.zeros_like(w_pred))
    h = tf.select(t, K.sigmoid(h_pred), K.zeros_like(h_pred))

    ow = overlap(x+xoffset, w*side, x_true+xoffset, w_true*side)
    oh = overlap(y+yoffset, h*side, y_true+yoffset, h_true*side)
    ow = tf.select(K.greater(ow,0), ow, K.zeros_like(ow))
    oh = tf.select(K.greater(oh,0), oh, K.zeros_like(oh))
    intersection = ow*oh
    union = w*h*(side**2) + w_true*h_true*(side**2) - intersection + K.epsilon()  # prevent div 0
    #
    recall_iou = intersection / union
    recall_t = K.greater(recall_iou, 0.5)
    recall_count = K.sum(tf.select(recall_t, K.ones_like(recall_iou), K.zeros_like(recall_iou)))
    #
    iou = K.sum(intersection / union, axis=1)
    obj_count = K.sum(tf.select(t, K.ones_like(x_true), K.zeros_like(x_true)) )
    ave_iou = K.sum(iou) / (obj_count)
    recall = recall_count / (obj_count)
    return ave_iou, recall, obj_count, intersection, union,ow,oh,x,y,w,h

# shape is (gridcells*(5+classes), )
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def call(self, layer_inputs, mask=None):
        # Resize all inputs to same size.
        resized_inputs = self._ensure_same_size(layer_inputs)

        # Compute sigmoid weighted inputs
        stacked = K.concatenate(resized_inputs, axis=-1)
        weighted = stacked * K.sigmoid(self.W)

        # Merge according to provided merge strategy.
        merged = self._merge(weighted)

        # Cache this for use in `get_output_shape_for`
        self._out_shape = K.int_shape(merged)
        return merged
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def create_model(self):
        model = Sequential()
        model.add(Dense(output_dim=self.n_output_classes, input_dim=self.n_input_nodes, activation='sigmoid', name='output_layer'))
        model.add(Activation('softmax'))
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        return model
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def create_model(self):

        # if self.embedded_input:
        #     seq1_layer = Input(shape=(self.n_timesteps, self.n_embedding_nodes), name="seq1_layer")
        #     seq2_layer = Input(shape=(self.n_timesteps, self.n_embedding_nodes), name="seq2_layer")
        #     mask_layer = Masking(mask_value=0.0, name='mask_layer')
        #     seq1_mask_layer = mask_layer(seq1_layer)
        #     seq2_mask_layer = mask_layer(seq2_layer)
        #     prev_seq1_layer = seq1_mask_layer
        #     prev_seq2_layer = seq2_mask_layer
        # else:
        #emb_layer = Embedding(self.lexicon_size + 1, self.n_embedding_nodes, mask_zero=True, name='emb_layer')
        seq1_layer = Input(shape=(self.lexicon_size + 1,), name="seq1_layer")
        #seq1_emb_layer = emb_layer(seq1_layer)
        seq2_layer = Input(shape=(self.lexicon_size + 1,), name="seq2_layer")
        #seq2_emb_layer = emb_layer(seq2_layer)
        prev_seq1_layer = seq1_layer
        prev_seq2_layer = seq2_layer

        for layer_idx in range(self.n_hidden_layers):
            # if layer_idx == self.n_hidden_layers - 1:
            #     return_sequences = False
            seq1_hidden_layer = Dense(output_dim=self.n_hidden_nodes, name='seq1_hidden_layer', activation='tanh')(prev_seq1_layer)
            seq2_hidden_layer = Dense(output_dim=self.n_hidden_nodes, name='seq2_hidden_layer', activation='tanh')(prev_seq2_layer)

        merge_layer = merge([seq1_hidden_layer, seq2_hidden_layer], mode='concat', concat_axis=-1, name='merge_layer')
        dense_layer = Dense(output_dim=self.n_hidden_nodes, name='dense_layer', activation='tanh')(merge_layer)
        pred_layer = Dense(output_dim=1, name='pred_layer', activation='sigmoid')(dense_layer)
        model = Model(input=[seq1_layer, seq2_layer], output=pred_layer)
        model.compile(loss='binary_crossentropy', optimizer='adam')#, metrics=['accuracy'])
        return model

    # def get_batch(self, seqs):
    #     '''takes sequences of word indices as input and returns word count vectors'''
    #     batch = []
    #     for seq in seqs:
    #         seq = numpy.bincount(numpy.array(seq), minlength=self.lexicon_size + 1)
    #         batch.append(seq)
    #     batch = numpy.array(batch)
    #     batch[:,0] = 0
    #     return batch
项目:narrative-prediction    作者:roemmele    | 项目源码 | 文件源码
def ranking_loss(self, y_true, y_pred):
        #import pdb;pdb.set_trace()
        pos_pred = y_pred[:,0]
        neg_pred = y_pred[:,1]
        loss = 1 - K.sigmoid(pos_pred - neg_pred) #K.maximum(1.0 + neg_pred - pos_pred, 0.0)
        return K.mean(loss)# + 0 * y_true