Python keras.backend 模块,log() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.log()

项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def categorical_crossentropy_3d(y_true, y_predicted):
    """
    Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array
    with shape (num_samples, num_classes, dim1, dim2, dim3)

    Parameters
    ----------
    y_true : keras.placeholder [batches, dim0,dim1,dim2]
        Placeholder for data holding the ground-truth labels encoded in a one-hot representation
    y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2]
        Placeholder for data holding the softmax distribution over classes

    Returns
    -------
    scalar
        Categorical cross-entropy loss value
    """
    y_true_flatten = K.flatten(y_true)
    y_pred_flatten = K.flatten(y_predicted)
    y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon())
    num_total_elements = K.sum(y_true_flatten)
    # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log))
    cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log))
    mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon())
    return mean_cross_entropy
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def adverse_model(discriminator):

    train_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    def margin_opt(inputs):
        assert len(inputs) == 2, ('Margin Output needs '
                              '2 inputs, %d given' % len(inputs))
        return K.log(inputs[0]) + K.log(1-inputs[1])

    margin = Lambda(margin_opt, output_shape=(lambda s : (None, 1)))\
               ([discriminator(train_input), discriminator(hypo_input)])
    adverserial = Model([train_input, hypo_input], margin)

    adverserial.compile(loss=minimize, optimizer='adam')
    return adverserial
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_conv_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')(
            self.state_in)
        self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')(
            self.l1)
        # self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')(
        #     self.l2)
        self.l3 = self.l2
        self.h = Flatten()(self.l3)
        self.hidden = Dense(256, init=init, activation='elu')(self.h)
        self.value = Dense(1, init=init)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_fc_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.hidden = Dense(256, init=init, activation='elu')(self.state_in)
        self.value = Dense(1)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)

        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        # print (type(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)))
        # print(Theano.function([self.state_in], [Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)])([np.zeros((32,) + self.state_dim)])[0].shape)
        # 1/0
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:rl    作者:Shmuma    | 项目源码 | 文件源码
def make_model(state_shape, n_actions):
    in_t = Input(shape=(HISTORY_STEPS,) + state_shape, name='input')
    action_t = Input(shape=(1,), dtype='int32', name='action')
    advantage_t = Input(shape=(1,), name='advantage')

    fl_t = Flatten(name='flat')(in_t)
    l1_t = Dense(SIMPLE_L1_SIZE, activation='relu', name='l1')(fl_t)
    l2_t = Dense(SIMPLE_L2_SIZE, activation='relu', name='l2')(l1_t)
    policy_t = Dense(n_actions, name='policy', activation='softmax')(l2_t)

    def loss_func(args):
        p_t, act_t, adv_t = args
        oh_t = K.one_hot(act_t, n_actions)
        oh_t = K.squeeze(oh_t, 1)
        p_oh_t = K.log(1e-6 + K.sum(oh_t * p_t, axis=-1, keepdims=True))
        res_t = adv_t * p_oh_t
        return -res_t

    loss_t = Lambda(loss_func, output_shape=(1,), name='loss')([policy_t, action_t, advantage_t])

    return Model(input=[in_t, action_t, advantage_t], output=[policy_t, loss_t])
项目:Parametric-t-SNE-in-Keras    作者:zaburo-ch    | 项目源码 | 文件源码
def x2p(X):
    tol = 1e-5
    n = X.shape[0]
    logU = np.log(perplexity)

    sum_X = np.sum(np.square(X), axis=1)
    D = sum_X + (sum_X.reshape([-1, 1]) - 2 * np.dot(X, X.T))

    idx = (1 - np.eye(n)).astype(bool)
    D = D[idx].reshape([n, -1])

    def generator():
        for i in xrange(n):
            yield i, D[i], tol, logU

    pool = mp.Pool(n_jobs)
    result = pool.map(x2p_job, generator())
    P = np.zeros([n, n])
    for i, thisP in result:
        P[i, idx[i]] = thisP

    return P
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def SP_pixelwise_loss(y_true,y_pred):
    y_true_label=y_true[:,:class_number,:,:]
    y_true_SP_weight=y_true[:,class_number:,:,:]

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    sample_num_per_class=K.sum(y_true_label,axis=[2,3],keepdims=True)
    class_ind=K.cast(K.greater(sample_num_per_class,0.),'float32')
    avg_sample_num_per_class=K.sum(sample_num_per_class,axis=1,keepdims=True)/K.sum(class_ind,axis=1,keepdims=True)
    sample_weight_per_class=avg_sample_num_per_class/(sample_num_per_class+0.1)
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)
    pixel_wise_loss=-K.log(y_pred_softmax)*y_true_label
    pixel_wise_loss=pixel_wise_loss*sample_weight_per_class
    weighter_pixel_wise_loss=K.sum(pixel_wise_loss,axis=1,keepdims=True)

    return K.mean(weighter_pixel_wise_loss*y_true_SP_weight)

#label distribution loss
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def layout_loss_hard(y_true,y_pred):

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)

    max_pred_softmax=K.max(y_pred_softmax,axis=1,keepdims=True)
    bin_pred_softmax_a=y_pred_softmax/max_pred_softmax
    bin_pred_softmax=bin_pred_softmax_a**6.

    final_pred=K.mean(bin_pred_softmax,axis=[2,3])
    final_pred=final_pred/(K.sum(final_pred,axis=1,keepdims=True)+K.epsilon())
    y_true_s=K.squeeze(y_true,axis=3)
    y_true_s=K.squeeze(y_true_s,axis=2)
    tier_wise_loss_v=-K.clip(K.log(final_pred),-500,500)*y_true_s
    return K.mean(K.sum(tier_wise_loss_v,axis=1))


#compile
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def custom_objective(y_true, y_pred):
    #prediction = Flatten(name='flatten')(dense_3)
    #prediction = ReRank(k=k, label=1, name='output')(prediction)
    #prediction = SoftReRank(softmink=softmink, softmaxk=softmaxk, label=1, name='output')(prediction)
    '''Just another crossentropy'''
    #y_true = K.clip(y_true, _EPSILON, 1.0-_EPSILON)
    y_true = K.max(y_true)
    #y_armax_index = numpy.argmax(y_pred)
    y_new = K.max(y_pred)
    #y_new = max(y_pred)
    '''
    if y_new >= 0.5:
        y_new_label = 1
    else:
        y_new_label = 0
    cce = abs(y_true - y_new_label)
    '''
    logEps=1e-8
    cce = - (y_true * K.log(y_new+logEps) + (1 - y_true)* K.log(1-y_new + logEps))
    return cce
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def register(self, info_tensor, param_tensor):
        self.info_tensor = info_tensor #(128,1)

        if self.stddev_fix:
            self.param_tensor = param_tensor

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
            std  = 1.0
        else:
            self.param_tensor = param_tensor # 2 

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
          # std  = K.maximum( param_tensor[:, 1].dimshuffle(0, 'x'), 0)
            std  = K.sigmoid( param_tensor[:, 1].dimshuffle(0, 'x') )

        e = (info_tensor-mean)/(std + K.epsilon())
        self.log_Q_c_given_x = \
            K.sum(-0.5*np.log(2*np.pi) -K.log(std+K.epsilon()) -0.5*(e**2), axis=1) * self.lmbd

#       m = Sequential([ Activation('softmax', input_shape=(self.n,)), Lambda(lambda x: K.log(x), lambda x: x) ])
        return K.reshape(self.log_Q_c_given_x, (-1, 1))
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def ori_loss(y_true, y_pred, lamb=1.):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # get ROI
    label_seg = K.sum(y_true, axis=-1, keepdims=True)
    label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32) 
    # weighted cross entropy loss
    lamb_pos, lamb_neg = 1., 1. 
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    logloss = logloss*label_seg # apply ROI
    logloss = -K.sum(logloss) / (K.sum(label_seg) + K.epsilon())
    # coherence loss, nearby ori should be as near as possible
    mean_kernal = np.reshape(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32)/8, [3, 3, 1, 1])    
    sin2angle_ori, cos2angle_ori, modulus_ori = ori2angle(y_pred)
    sin2angle = K.conv2d(sin2angle_ori, mean_kernal, padding='same')
    cos2angle = K.conv2d(cos2angle_ori, mean_kernal, padding='same')
    modulus = K.conv2d(modulus_ori, mean_kernal, padding='same')
    coherence = K.sqrt(K.square(sin2angle) + K.square(cos2angle)) / (modulus + K.epsilon())
    coherenceloss = K.sum(label_seg) / (K.sum(coherence*label_seg) + K.epsilon()) - 1
    loss = logloss + lamb*coherenceloss
    return loss
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def mnt_s_loss(y_true, y_pred):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # get ROI
    label_seg = K.tf.cast(K.tf.not_equal(y_true, 0.0), K.tf.float32) 
    y_true = K.tf.where(K.tf.less(y_true,0.0), K.tf.zeros_like(y_true), y_true) # set -1 -> 0
    # weighted cross entropy loss       
    total_elements = K.sum(label_seg) + K.epsilon()  
    lamb_pos, lamb_neg = 10., .5
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    # apply ROI
    logloss = logloss*label_seg
    logloss = -K.sum(logloss) / total_elements
    return logloss    

# find highest peak using gaussian
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def ori_loss(y_true, y_pred, lamb=1.):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # get ROI
    label_seg = K.sum(y_true, axis=-1, keepdims=True)
    label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32) 
    # weighted cross entropy loss
    lamb_pos, lamb_neg = 1., 1. 
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    logloss = logloss*label_seg # apply ROI
    logloss = -K.sum(logloss) / (K.sum(label_seg) + K.epsilon())
    # coherence loss, nearby ori should be as near as possible
    mean_kernal = np.reshape(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32)/8, [3, 3, 1, 1])    
    sin2angle_ori, cos2angle_ori, modulus_ori = ori2angle(y_pred)
    sin2angle = K.conv2d(sin2angle_ori, mean_kernal, padding='same')
    cos2angle = K.conv2d(cos2angle_ori, mean_kernal, padding='same')
    modulus = K.conv2d(modulus_ori, mean_kernal, padding='same')
    coherence = K.sqrt(K.square(sin2angle) + K.square(cos2angle)) / (modulus + K.epsilon())
    coherenceloss = K.sum(label_seg) / (K.sum(coherence*label_seg) + K.epsilon()) - 1
    loss = logloss + lamb*coherenceloss
    return loss
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def mnt_s_loss(y_true, y_pred):
    # clip
    y_pred = K.tf.clip_by_value(y_pred, K.epsilon(), 1 - K.epsilon())
    # get ROI
    label_seg = K.tf.cast(K.tf.not_equal(y_true, 0.0), K.tf.float32) 
    y_true = K.tf.where(K.tf.less(y_true,0.0), K.tf.zeros_like(y_true), y_true) # set -1 -> 0
    # weighted cross entropy loss       
    total_elements = K.sum(label_seg) + K.epsilon()  
    lamb_pos, lamb_neg = 10., .5
    logloss = lamb_pos*y_true*K.log(y_pred)+lamb_neg*(1-y_true)*K.log(1-y_pred)
    # apply ROI
    logloss = logloss*label_seg
    logloss = -K.sum(logloss) / total_elements
    return logloss    

# find highest peak using gaussian
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def mix_gaussian_loss(x, mu, log_sig, w):
    '''
    Combine the mixture of gaussian distribution and the loss into a single function
    so that we can do the log sum exp trick for numerical stability...
    '''
    if K.backend() == "tensorflow":
        x.set_shape([None, 1])
    gauss = log_norm_pdf(K.repeat_elements(x=x, rep=mu.shape[1], axis=1), mu, log_sig)
    # TODO: get rid of clipping.
    gauss = K.clip(gauss, -40, 40)
    max_gauss = K.maximum((0.), K.max(gauss))
    # log sum exp trick...
    gauss = gauss - max_gauss
    out = K.sum(w * K.exp(gauss), axis=1)
    loss = K.mean(-K.log(out) + max_gauss)
    return loss
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def loss_logcosh(y_true, x):
    """
    This loss implements a logcosh loss with a dummy for the uncertainty.
    It approximates a mean-squared loss for small differences and a linear one for
    large differences, therefore it is conceptually similar to the Huber loss.
    This loss here is scaled, such that it start becoming linear around 4-5 sigma
    """
    scalefactor_a=30
    scalefactor_b=0.4

    from tensorflow import where, greater, abs, zeros_like, exp

    x_pred = x[:,1:]
    x_sig = x[:,:1]
    def cosh(y):
        return (K.exp(y) + K.exp(-y)) / 2

    return K.mean(0.5*K.square(x_sig))   + K.mean(scalefactor_a* K.log(cosh( scalefactor_b*(x_pred - y_true))), axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def loss_logcosh_noUnc(y_true, x_pred):
    """
    This loss implements a logcosh loss without a dummy for the uncertainty.
    It approximates a mean-squared loss for small differences and a linear one for
    large differences, therefore it is conceptually similar to the Huber loss.
    This loss here is scaled, such that it start becoming linear around 4-5 sigma
    """
    scalefactor_a=1.
    scalefactor_b=3.

    from tensorflow import where, greater, abs, zeros_like, exp

    dxrel=(x_pred - y_true)/(y_true+0.0001)
    def cosh(x):
        return (K.exp(x) + K.exp(-x)) / 2

    return scalefactor_a*K.mean( K.log(cosh(scalefactor_b*dxrel)), axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def mean_log_Gaussian_like(y_true, parameters):
    """Mean Log Gaussian Likelihood distribution
    Note: The 'c' variable is obtained as global variable
    """

    #Note: The output size will be (c + 2) * m = 6
    c = 1 #The number of outputs we want to predict
    m = 2 #The number of distributions we want to use in the mixture
    components = K.reshape(parameters,[-1, c + 2, m])
    mu = components[:, :c, :]
    sigma = components[:, c, :]
    alpha = components[:, c + 1, :]
    alpha = K.softmax(K.clip(alpha,1e-8,1.))

    exponent = K.log(alpha) - .5 * float(c) * K.log(2 * np.pi) \
    - float(c) * K.log(sigma) \
    - K.sum((K.expand_dims(y_true,2) - mu)**2, axis=1)/(2*(sigma)**2)

    log_gauss = log_sum_exp(exponent, axis=1)
    res = - K.mean(log_gauss)
    return res
项目:loss-correction    作者:giorgiop    | 项目源码 | 文件源码
def robust(name, P):

    if name == 'backward':
        P_inv = K.constant(np.linalg.inv(P))

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(K.dot(y_true, P_inv) * K.log(y_pred), axis=-1)

    elif name == 'forward':
        P = K.constant(P)

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(y_true * K.log(K.dot(y_pred, P)), axis=-1)

    return loss
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def kde_entropy(output, var):
    # Kernel density estimate of entropy, in nats

    dims = K.cast(K.shape(output)[1], K.floatx() ) 
    N    = K.cast(K.shape(output)[0], K.floatx() )

    normconst = (dims/2.0)*K.log(2*np.pi*var)

    # get dists matrix
    x2 = K.expand_dims(K.sum(K.square(output), axis=1), 1)
    dists = x2 + K.transpose(x2) - 2*K.dot(output, K.transpose(output))
    dists = dists / (2*var)

    lprobs = logsumexp(-dists, axis=1) - K.log(N) - normconst
    h = -K.mean(lprobs)

    return h
项目:value_gradient    作者:rarilurelo    | 项目源码 | 文件源码
def optimize_pi(self, batch):
        if not self.built:
            self.build()
        sampled_action_for_M = self.sess.run(self.sampled_action_for_M, {self.states: batch['states']})
        sampled_action = np.transpose(sampled_action_for_M, (1, 0, 2))[:, :, np.newaxis, :]
        pairwise_d = np.sum((np.tile(sampled_action, (self.M_pi, 1)) - \
            np.transpose(np.tile(sampled_action, (self.M_pi, 1)), (0, 2, 1, 3)))**2, axis=3).reshape(sampled_action.shape[0], -1)
        d = np.median(pairwise_d, axis=1)
        h = d/(2*np.log(self.M_pi+1))
        feed_in = {
                self.states: batch['states'],
                self.actions: batch['actions'],
                self.sampled_action_feeder: sampled_action_for_M,
                self.h: h,
                }
        self.sess.run(self.pi_updater, feed_in)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def categorical_crossentropy_3d_SW(y_true_sw, y_predicted):
    """
    Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array
    with shape (num_samples, num_classes, dim1, dim2, dim3)

    Parameters
    ----------
    y_true : keras.placeholder [batches, dim0,dim1,dim2]
        Placeholder for data holding the ground-truth labels encoded in a one-hot representation
    y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2]
        Placeholder for data holding the softmax distribution over classes

    Returns
    -------
    scalar
        Categorical cross-entropy loss value
    """
    sw = y_true_sw[:,:,:,:,K.int_shape(y_predicted)[-1]:]
    y_true = y_true_sw[:,:,:,:,:K.int_shape(y_predicted)[-1]]

    y_true_flatten = K.flatten(y_true*sw)
    y_pred_flatten = K.flatten(y_predicted)
    y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon())
    num_total_elements = K.sum(y_true_flatten)
    # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log))
    cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log))
    mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon())
    return mean_cross_entropy
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def categorical_crossentropy_3d_masked(vectors):
    """
    Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array
    with shape (num_samples, num_classes, dim1, dim2, dim3)

    Parameters
    ----------
    y_true : keras.placeholder [batches, dim0,dim1,dim2]
        Placeholder for data holding the ground-truth labels encoded in a one-hot representation
    y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2]
        Placeholder for data holding the softmax distribution over classes

    Returns
    -------
    scalar
        Categorical cross-entropy loss value
    """

    y_predicted, mask, y_true = vectors

    y_true_flatten = K.flatten(y_true)
    y_pred_flatten = K.flatten(y_predicted)
    y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon())
    num_total_elements = K.sum(mask)
    # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log))
    cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log))
    mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon())
    return mean_cross_entropy
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def categorical_crossentropy_3d_lambda(vectors):
    y_true, y_pred = vectors

    y_true_flatten = K.flatten(y_true)
    y_pred_flatten = K.flatten(y_pred)
    y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon())

    # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log))
    cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log))
    mean_cross_entropy = cross_entropy / (K.sum(y_true) + K.epsilon())
    return mean_cross_entropy
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def focal_loss(target, output, gamma=2):
    output /= K.sum(output, axis=-1, keepdims=True)
    eps = K.epsilon()
    output = K.clip(output, eps, 1. - eps)
    return -K.sum(K.pow(1. - output, gamma) * target * K.log(output),
                  axis=-1)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def anneal_rate(epoch,min=0.1,max=5.0):
    import math
    return math.log(max/min) / epoch
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def call(self,logits):
        u = K.random_uniform(K.shape(logits), 0, 1)
        gumbel = - K.log(-K.log(u + 1e-20) + 1e-20)
        return K.in_train_phase(
            K.softmax( ( logits + gumbel ) / self.tau ),
            K.softmax( ( logits + gumbel ) / self.min ))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def loss(self):
        logits = self.logits
        q = K.softmax(logits)
        log_q = K.log(q + 1e-20)
        return - K.mean(q * (log_q - K.log(1.0/K.int_shape(logits)[-1])),
                        axis=tuple(range(1,len(K.int_shape(logits)))))
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def w_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy

    Variables:
        weights: numpy array of shape (C,) where C is the number of classes

    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')

    Credit to:
    @wassname (github)
    https://gist.github.com/wassname/ce364fddfc8a025bfab4348cf5de852d
    """

    weights = K.variable(weights)

    def loss(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss

    return loss
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def dice_coef_loss3(y_true, y_pred):
    return -K.log(dice_coef(y_true, y_pred))
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def logsumexp(x, axis=None):
        '''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
        '''
        return tf.reduce_logsumexp(x, axis=[axis])
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def logsumexp(x, axis=None):
        '''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
        '''
        xmax = K.max(x, axis=axis, keepdims=True)
        xmax_ = K.max(x, axis=axis)
        return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis))
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def sparse_chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Given the true sparsely encoded tag sequence y, input x (with mask),
    transition energies U, boundary energies b_start and b_end, it computes
    the loss function of a Linear Chain Conditional Random Field:
    loss(y, x) = NNL(P(y|x)), where P(y|x) = exp(E(y, x)) / Z.
    So, loss(y, x) = - E(y, x) + log(Z)
    Here, E(y, x) is the tag path energy, and Z is the normalization constant.
    The values log(Z) is also called free energy.
    '''
    x = add_boundary_energy(x, b_start, b_end, mask)
    energy = path_energy0(y, x, U, mask)
    energy -= free_energy0(x, U, mask)
    return K.expand_dims(-energy, -1)
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def my_logloss(act, pred):
    epsilon = 1e-15
    pred = K.maximum(epsilon, pred)
    pred = K.minimum(1 - epsilon, pred)
    ll = K.sum(act * K.log(pred) + (1 - act) * K.log(1 - pred))
    ll = ll * -1.0 / K.shape(act)[0]

    return ll
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def logloss(act, pred):
    '''
    ????????
    :param act: 
    :param pred: 
    :return: 
    '''
    epsilon = 1e-15
    pred = sp.maximum(epsilon, pred)
    pred = sp.minimum(1 - epsilon, pred)
    ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
    ll = ll * -1.0 / len(act)
    return ll
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def PSNRLoss(y_true, y_pred):
    """
    PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.

    It can be calculated as
    PSNR = 20 * log10(MAXp) - 10 * log10(MSE)

    When providing an unscaled input, MAXp = 255. Therefore 20 * log10(255)== 48.1308036087.
    However, since we are scaling our input, MAXp = 1. Therefore 20 * log10(1) = 0.
    Thus we remove that component completely and only compute the remaining MSE component.
    """
    return -10.0 * K.log(1.0 / (K.mean(K.square(y_pred - y_true)))) / K.log(10.0)
项目:medaka    作者:nanoporetech    | 项目源码 | 文件源码
def qscore(y_true, y_pred):
    error = K.cast(K.not_equal(
        K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1), K.floatx())),
        K.floatx()
    )
    error = K.sum(error) / K.sum(K.ones_like(error))
    return -10.0 * 0.434294481 * K.log(error)
项目:Deep-Learning-with-Theano    作者:PacktPublishing    | 项目源码 | 文件源码
def policy_loss(advantage=0., beta=0.01):
    def loss(y_true, y_pred):
        return -K.sum(K.log(K.sum(y_true * y_pred, axis=-1) + K.epsilon()) * K.flatten(advantage)) + \
               beta * K.sum(y_pred * K.log(y_pred + K.epsilon()))
    return loss
项目:pmet    作者:bkj    | 项目源码 | 文件源码
def lifted_loss(margin=1):
    """
      Lifted loss, per "Deep Metric Learning via Lifted Structured Feature Embedding" by Song et al
      Implemented in `keras`

      See also the `pytorch` implementation at: https://gist.github.com/bkj/565c5e145786cfd362cffdbd8c089cf4
    """
    def f(target, score):

        # Compute mask (-1 for different class, 1 for same class, 0 for diagonal)
        mask = (2 * K.equal(0, target - K.reshape(target, (-1, 1))) - 1)
        mask = (mask - K.eye(score.shape[0]))

        # Compute distance between rows
        mag  = (score ** 2).sum(axis=-1)
        mag  = K.tile(mag, (mag.shape[0], 1))
        dist = (mag + mag.T - 2 * score.dot(score.T))
        dist = K.sqrt(K.maximum(0, dist))

        # Negative component (points from different class should be far)
        l_n = K.sum((K.exp(margin - dist) * K.equal(mask, -1)), axis=-1)
        l_n = K.tile(l_n, (score.shape[0], 1))
        l_n = K.log(l_n + K.transpose(l_n))
        l_n = l_n * K.equal(mask, 1)

        # Positive component (points from same class should be close)
        l_p = dist * K.equal(mask, 1)

        loss  = K.sum((K.maximum(0, l_n + l_p) ** 2))
        n_pos = K.sum(K.equal(mask, 1))
        loss /= (2 * n_pos)

        return loss

    return f

# --
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def ranknet(y_true, y_pred):
    """ Bipartite ranking surrogate """
    return K.mean(K.log(1. + K.exp(-(y_true * y_pred - (1-y_true) * y_pred))), axis=-1)
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def cicerons_1504(y_true, y_pred):
    """ Bipartite ranking surrogate - http://arxiv.org/pdf/1504.06580v2.pdf """
    return K.mean(K.log(1. + K.exp(2*(2.5 - y_true*y_pred))) +
                  K.log(1. + K.exp(2*(0.5 + (1-y_true)*y_pred))), axis=-1)
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def ranknet(y_true, y_pred):
    """ Bipartite ranking surrogate """
    return K.mean(K.log(1. + K.exp(-(y_true * y_pred - (1-y_true) * y_pred))), axis=-1)
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def cicerons_1504(y_true, y_pred):
    """ Bipartite ranking surrogate - http://arxiv.org/pdf/1504.06580v2.pdf """
    return K.mean(K.log(1. + K.exp(2*(2.5 - y_true*y_pred))) +
                  K.log(1. + K.exp(2*(0.5 + (1-y_true)*y_pred))), axis=-1)
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def cross_entropy(self, y_true, y_pred):
        y_pred /= tf.reduce_sum(y_pred, axis=-1, keep_dims=True)
        y_pred = K.maximum(K.minimum(y_pred, 1 - 1e-15), 1e-15)
        cross_entropy_loss = - K.sum(y_true * K.log(y_pred), axis=-1)
        return cross_entropy_loss
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def _softmax_loss(self, y_true, y_pred):
        y_pred = K.maximum(K.minimum(y_pred, 1 - 1e-15), 1e-15)
        softmax_loss = - K.sum(y_true * K.log(y_pred), axis=-1)
        return softmax_loss
项目:keras-mdn    作者:yanji84    | 项目源码 | 文件源码
def get_lossfunc(out_pi, out_sigma, out_mu, y):
    result = tf_normal(y, out_mu, out_sigma)
    result = result * out_pi
    result = K.sum(result, axis=1, keepdims=True)
    result = -K.log(result + 1e-8)
    return K.mean(result)
项目:NNProject_DeepMask    作者:abbypa    | 项目源码 | 文件源码
def binary_regression_error(y_true, y_pred):
    return score_output_lambda * K.log(1 + K.exp(-y_true*y_pred))
项目:NNProject_DeepMask    作者:abbypa    | 项目源码 | 文件源码
def mask_binary_regression_error(y_true, y_pred):
    # upper left is -1 (background- legal centered mask)- multiply by 1
    # upper left is 1 (illegal centered mask)- return 0
    return seg_output_lambda * 0.5 * (1 - y_true[0][0][0]) * K.mean(K.log(1 + K.exp(-y_true*y_pred)))
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def vae_loss(x_, x_reconstruct):
    rec_loss = binary_crossentropy(x_, x_reconstruct)
    kl_loss = - 0.5 * K.mean(1 + 2*K.log(z_std + 1e-10) - z_mean**2 - z_std**2, axis=-1)
    return rec_loss + kl_loss
项目:foolbox    作者:bethgelab    | 项目源码 | 文件源码
def _to_logits(self, predictions):
        from keras import backend as K
        eps = 10e-8
        predictions = K.clip(predictions, eps, 1 - eps)
        predictions = K.log(predictions)
        return predictions