Python keras.backend 模块,clip() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.clip()

项目:ensemble-adv-training    作者:ftramer    | 项目源码 | 文件源码
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
    """
    FGSM attack.
    """

    # signed gradient
    normed_grad = K.sign(grad)

    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)
    return adv_x
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def to_configs(states, verbose=True, **kwargs):
    base = setting['base']
    width  = states.shape[1] // base
    height = states.shape[1] // base
    load(width,height)

    def build():
        P = len(setting['panels'])
        states = Input(shape=(height*base,width*base))
        error = build_error(states, height, width, base)

        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], height * width, -1])
        # a, pos, panel
        matches = K.permute_dimensions(matches, [0,2,1])
        # a, panel, pos
        config = matches * K.arange(height*width,dtype='float')
        config = K.sum(config, axis=-1)
        return Model(states, wrap(states, config))

    model = build()
    return model.predict(states, **kwargs)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def to_configs(states, verbose=True, **kwargs):
    base = panels.shape[1]
    dim  = states.shape[1] - pad*2
    size = dim // base

    def build():
        states = Input(shape=(dim+2*pad,dim+2*pad))
        s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args)
        error = build_errors(s,base,pad,dim,size)
        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], size * size, -1])
        # a, pos, panel
        config = matches * K.arange(2,dtype='float')
        config = K.sum(config, axis=-1)
        # this is 0,1 configs; for compatibility, we need -1 and 1
        config = - (config - 0.5)*2
        return Model(states, wrap(states, K.round(config)))

    return build().predict(states, **kwargs)
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def SP_pixelwise_loss(y_true,y_pred):
    y_true_label=y_true[:,:class_number,:,:]
    y_true_SP_weight=y_true[:,class_number:,:,:]

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    sample_num_per_class=K.sum(y_true_label,axis=[2,3],keepdims=True)
    class_ind=K.cast(K.greater(sample_num_per_class,0.),'float32')
    avg_sample_num_per_class=K.sum(sample_num_per_class,axis=1,keepdims=True)/K.sum(class_ind,axis=1,keepdims=True)
    sample_weight_per_class=avg_sample_num_per_class/(sample_num_per_class+0.1)
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)
    pixel_wise_loss=-K.log(y_pred_softmax)*y_true_label
    pixel_wise_loss=pixel_wise_loss*sample_weight_per_class
    weighter_pixel_wise_loss=K.sum(pixel_wise_loss,axis=1,keepdims=True)

    return K.mean(weighter_pixel_wise_loss*y_true_SP_weight)

#label distribution loss
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def layout_loss_hard(y_true,y_pred):

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)

    max_pred_softmax=K.max(y_pred_softmax,axis=1,keepdims=True)
    bin_pred_softmax_a=y_pred_softmax/max_pred_softmax
    bin_pred_softmax=bin_pred_softmax_a**6.

    final_pred=K.mean(bin_pred_softmax,axis=[2,3])
    final_pred=final_pred/(K.sum(final_pred,axis=1,keepdims=True)+K.epsilon())
    y_true_s=K.squeeze(y_true,axis=3)
    y_true_s=K.squeeze(y_true_s,axis=2)
    tier_wise_loss_v=-K.clip(K.log(final_pred),-500,500)*y_true_s
    return K.mean(K.sum(tier_wise_loss_v,axis=1))


#compile
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def per_class_recall(classes):

    def class_recall(y_true, y_pred):
        '''Calculates the per class recall
        '''
        recalls = {}
        true_positives = K.sum(K.round(K.clip(y_true*y_pred, 0, 1)), axis=0)
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)

        for i,c in enumerate(classes):
            recalls[c+'_RECALL'] = true_positives[i] / (possible_positives[i] + K.epsilon())

        return recalls
    return class_recall


# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~ Plots ~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def custom_objective(y_true, y_pred):
    #prediction = Flatten(name='flatten')(dense_3)
    #prediction = ReRank(k=k, label=1, name='output')(prediction)
    #prediction = SoftReRank(softmink=softmink, softmaxk=softmaxk, label=1, name='output')(prediction)
    '''Just another crossentropy'''
    #y_true = K.clip(y_true, _EPSILON, 1.0-_EPSILON)
    y_true = K.max(y_true)
    #y_armax_index = numpy.argmax(y_pred)
    y_new = K.max(y_pred)
    #y_new = max(y_pred)
    '''
    if y_new >= 0.5:
        y_new_label = 1
    else:
        y_new_label = 0
    cce = abs(y_true - y_new_label)
    '''
    logEps=1e-8
    cce = - (y_true * K.log(y_new+logEps) + (1 - y_true)* K.log(1-y_new + logEps))
    return cce
项目:kaggle_amazon    作者:asanakoy    | 项目源码 | 文件源码
def fscore(y_true, y_pred, average='samples', beta=2):
    sum_axis = 1 if average == 'samples' else 0

    # calculate weighted counts
    true_and_pred = K.round(K.clip(y_true * y_pred, 0, 1))
    tp_sum = K.sum(true_and_pred, axis=sum_axis)
    pred_sum = K.sum(y_pred, axis=sum_axis)
    true_sum = K.sum(y_true, axis=sum_axis)

    beta2 = beta ** 2

    precision = tp_sum / (pred_sum + K.epsilon())
    recall = tp_sum / (true_sum + K.epsilon())

    f_score = ((1 + beta2) * precision * recall /
               (beta2 * precision + recall + K.epsilon()))
    # f_score[tp_sum == 0] = 0.0
    # f_score = K.switch(K.equal(f_score, 0.0), 0.0, f_score)
    return K.mean(f_score)
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def register(self, info_tensor, param_tensor):
        self.info_tensor = info_tensor #(128,1)

        if self.stddev_fix:
            self.param_tensor = param_tensor

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
            std  = 1.0
        else:
            self.param_tensor = param_tensor # 2 

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
          # std  = K.maximum( param_tensor[:, 1].dimshuffle(0, 'x'), 0)
            std  = K.sigmoid( param_tensor[:, 1].dimshuffle(0, 'x') )

        e = (info_tensor-mean)/(std + K.epsilon())
        self.log_Q_c_given_x = \
            K.sum(-0.5*np.log(2*np.pi) -K.log(std+K.epsilon()) -0.5*(e**2), axis=1) * self.lmbd

#       m = Sequential([ Activation('softmax', input_shape=(self.n,)), Lambda(lambda x: K.log(x), lambda x: x) ])
        return K.reshape(self.log_Q_c_given_x, (-1, 1))
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def f1_score(y_true, y_pred):

    # Count positive samples.
    c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
    c3 = K.sum(K.round(K.clip(y_true, 0, 1)))

    # If there are no true samples, fix the F1 score at 0.
    if c3 == 0:
        return 0

    # How many selected items are relevant?
    precision = c1 / c2

    # How many relevant items are selected?
    recall = c1 / c3

    # Calculate f1_score
    f1_score = 2 * (precision * recall) / (precision + recall)
    return f1_score
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def get_gradients(self, loss, params):
    '''
    Replacement for the default keras get_gradients() function.
    Modification: checks if the object has the attribute grads and 
    returns that rather than calculating the gradients using automatic
    differentiation. 
    '''
    if hasattr(self, 'grads'):
        grads = self.grads
    else:
        grads = K.gradients(loss, params)
    if hasattr(self, 'clipnorm') and self.clipnorm > 0:
        norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
        grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
    if hasattr(self, 'clipvalue') and self.clipvalue > 0:
        grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
    return grads
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def mix_gaussian_loss(x, mu, log_sig, w):
    '''
    Combine the mixture of gaussian distribution and the loss into a single function
    so that we can do the log sum exp trick for numerical stability...
    '''
    if K.backend() == "tensorflow":
        x.set_shape([None, 1])
    gauss = log_norm_pdf(K.repeat_elements(x=x, rep=mu.shape[1], axis=1), mu, log_sig)
    # TODO: get rid of clipping.
    gauss = K.clip(gauss, -40, 40)
    max_gauss = K.maximum((0.), K.max(gauss))
    # log sum exp trick...
    gauss = gauss - max_gauss
    out = K.sum(w * K.exp(gauss), axis=1)
    loss = K.mean(-K.log(out) + max_gauss)
    return loss
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def huberishLoss_noUnc(y_true, x_pred):


    dxrel=(x_pred - y_true)/1#(K.clip(K.abs(y_true+0.1),K.epsilon(),None))
    dxrel=K.clip(dxrel,-1e6,1e6)

    #defines the inverse of starting point of the linear behaviour
    scaler=2

    dxabs=K.abs(scaler* dxrel)
    dxsq=K.square(scaler * dxrel)
    dxp4=K.square(dxsq)

    lossval=dxsq / (1+dxp4) + (2*dxabs -1)/(1 + 1/dxp4)
    #K.clip(lossval,-1e6,1e6)

    return K.mean( lossval , axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def mean_log_Gaussian_like(y_true, parameters):
    """Mean Log Gaussian Likelihood distribution
    Note: The 'c' variable is obtained as global variable
    """

    #Note: The output size will be (c + 2) * m = 6
    c = 1 #The number of outputs we want to predict
    m = 2 #The number of distributions we want to use in the mixture
    components = K.reshape(parameters,[-1, c + 2, m])
    mu = components[:, :c, :]
    sigma = components[:, c, :]
    alpha = components[:, c + 1, :]
    alpha = K.softmax(K.clip(alpha,1e-8,1.))

    exponent = K.log(alpha) - .5 * float(c) * K.log(2 * np.pi) \
    - float(c) * K.log(sigma) \
    - K.sum((K.expand_dims(y_true,2) - mu)**2, axis=1)/(2*(sigma)**2)

    log_gauss = log_sum_exp(exponent, axis=1)
    res = - K.mean(log_gauss)
    return res
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def mean_log_LaPlace_like(y_true, parameters):
    """Mean Log Laplace Likelihood distribution
    Note: The 'c' variable is obtained as global variable
    """
    #Note: The output size will be (c + 2) * m = 6
    c = 1 #The number of outputs we want to predict
    m = 2 #The number of distributions we want to use in the mixture
    components = K.reshape(parameters,[-1, c + 2, m])
    mu = components[:, :c, :]
    sigma = components[:, c, :]
    alpha = components[:, c + 1, :]
    alpha = K.softmax(K.clip(alpha,1e-2,1.))

    exponent = K.log(alpha) - float(c) * K.log(2 * sigma) \
    - K.sum(K.abs(K.expand_dims(y_true,2) - mu), axis=1)/(sigma)

    log_gauss = log_sum_exp(exponent, axis=1)
    res = - K.mean(log_gauss)
    return res
项目:loss-correction    作者:giorgiop    | 项目源码 | 文件源码
def robust(name, P):

    if name == 'backward':
        P_inv = K.constant(np.linalg.inv(P))

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(K.dot(y_true, P_inv) * K.log(y_pred), axis=-1)

    elif name == 'forward':
        P = K.constant(P)

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(y_true * K.log(K.dot(y_pred, P)), axis=-1)

    return loss
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def precision(y_true, y_pred):
  y_true, y_pred = K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)
  y_true, y_pred = K.cast(y_true, 'float32'), K.cast(y_pred, 'float32')
  TP = K.sum(K.clip(y_true * y_pred, 0, 1)) # how many
  predicted_positives = K.sum(K.clip(y_pred, 0, 1))
  return TP / (predicted_positives + K.epsilon())
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def recall(y_true, y_pred):
  y_true, y_pred = K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)
  y_true, y_pred = K.cast(y_true, 'float32'), K.cast(y_pred, 'float32')
  TP = K.sum(K.clip(y_true * y_pred, 0, 1))  # how many
  # TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  # possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  possible_positives = K.sum(K.clip(y_true, 0, 1))
  return TP / (possible_positives + K.epsilon())
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def f1_score(y_true, y_pred):
  # If there are no true positives, fix the F score at 0 like sklearn.
  if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
      return 0
  p = precision(y_true, y_pred)
  r = recall(y_true, y_pred)
  fscore = 2 * (p * r) / (p + r + K.epsilon())
  return fscore
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def __call__(self, p):
        return K.clip(p, self.min_value, self.max_value)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def _hard_sigmoid(x):
    '''Hard sigmoid different from the more conventional form (see definition of K.hard_sigmoid).

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    x = (0.5 * x) + 0.5
    return K.clip(x, 0, 1)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def focal_loss(target, output, gamma=2):
    output /= K.sum(output, axis=-1, keepdims=True)
    eps = K.epsilon()
    output = K.clip(output, eps, 1. - eps)
    return -K.sum(K.pow(1. - output, gamma) * target * K.log(output),
                  axis=-1)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def __call__(self, p):
        return K.clip(p, self.min_value, self.max_value)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def ternary_tanh(x):
    x = K.clip(x, -1, 1)
    return ternarize(x)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def __call__(self, p):
        return K.clip(p, self.min_value, self.max_value)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def _hard_sigmoid(x):
    '''Hard sigmoid different from the more conventional form (see definition of K.hard_sigmoid).

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    x = (0.5 * x) + 0.5
    return K.clip(x, 0, 1)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def precision(y_true, y_pred):
    """Precision metric.

    Only computes a batch-wise average of precision.
    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """

    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def recall(y_true, y_pred):
    """Recall metric.

    Only computes a batch-wise average of recall.
    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """

    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def fbeta_score(y_true, y_pred, beta=1):
    """Computes the F score.

    The F score is the weighted harmonic mean of precision and recall.
    Here it is only computed as a batch-wise average, not globally.
    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.
    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    """

    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def validate_states(states,verbose=True,**kwargs):
    base = panels.shape[1]
    dim  = states.shape[1] - pad*2
    size = dim // base

    def build():
        states = Input(shape=(dim+2*pad,dim+2*pad))
        s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args)
        error = build_errors(s,base,pad,dim,size)
        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1,2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1,2))
        panels_nomatch   = K.any(K.equal(num_matches, 0), (1,2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2))

        validity = panels_ok

        if verbose:
            return Model(states,
                         [ wrap(states, x) for x in [panels_ng,
                                                     panels_nomatch,
                                                     panels_ambiguous,
                                                     validity]])
        else:
            return Model(states, wrap(states, validity))

    if verbose:
        panels_ng, panels_nomatch, panels_ambiguous, validity \
            = build().predict(states, **kwargs)
        print(np.count_nonzero(panels_ng),       "images have some panels which match 0 or >2 panels, out of which")
        print(np.count_nonzero(panels_nomatch),  "images have some panels which are unlike any panels")
        print(np.count_nonzero(panels_ambiguous),"images have some panels which match >2 panels")
        print(np.count_nonzero(validity),        "images have panels (all of them) which match exactly 1 panel each")
        return validity
    else:
        validity \
            = build().predict(states, **kwargs)
        return validity
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def validate_states(states,verbose=True,**kwargs):
    base = panels.shape[1]
    size = states.shape[1]//base
    dim  = states.shape[1]
    def build():
        states = Input(shape=(dim,dim))
        error = build_errors(states,base,dim,size)
        matches = 1 - K.clip(K.sign(error - threshold),0,1)

        num_matches = K.sum(matches, axis=3)
        panels_ok = K.all(K.equal(num_matches, 1), (1,2))
        panels_ng = K.any(K.not_equal(num_matches, 1), (1,2))
        panels_nomatch   = K.any(K.equal(num_matches, 0), (1,2))
        panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2))

        validity = panels_ok

        if verbose:
            return Model(states,
                         [ wrap(states, x) for x in [panels_ng,
                                                     panels_nomatch,
                                                     panels_ambiguous,
                                                     validity]])
        else:
            return Model(states, wrap(states, validity))

    model = build()
    #     model.summary()
    if verbose:
        panels_ng, panels_nomatch, panels_ambiguous, validity = model.predict(states, **kwargs)
        print(np.count_nonzero(panels_ng),       "images have some panels which match 0 or >2 panels, out of which")
        print(np.count_nonzero(panels_nomatch),  "images have some panels which are unlike any panels")
        print(np.count_nonzero(panels_ambiguous),"images have some panels which match >2 panels")
        print(np.count_nonzero(validity),        "images have panels (all of them) which match exactly 1 panel each")
        return validity
    else:
        validity = model.predict(states, **kwargs)
        return validity
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def w_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy

    Variables:
        weights: numpy array of shape (C,) where C is the number of classes

    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')

    Credit to:
    @wassname (github)
    https://gist.github.com/wassname/ce364fddfc8a025bfab4348cf5de852d
    """

    weights = K.variable(weights)

    def loss(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss

    return loss
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, x1 = x
        a = x1[:, :1]
        s_hat = x1[:, 1:2]

        # Rescale the weights, making sure we mostly scale down
        a_hat = a * K.clip(s_hat / s, self.min_decrease, self.max_increase)

        # Scale again so that the reported loss is comparable to the other ones
        t = 1
        #sT = K.transpose(s)
        #t = K.dot(sT, a) / K.dot(sT, a_hat)

        return K.stop_gradient([a_hat * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, x1 = x
        a = x1[:, :1]
        s_hat = x1[:, 1:2]

        # Rescale the weights, making sure we mostly scale down
        a_hat = a * K.clip(s_hat / s, self.min_decrease, self.max_increase)

        # Scale again so that the reported loss is comparable to the other ones
        t = 1
        #sT = K.transpose(s)
        #t = K.dot(sT, a) / K.dot(sT, a_hat)

        return K.stop_gradient([a_hat * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, x1 = x
        a = x1[:, :1]
        s_hat = x1[:, 1:2]

        # Rescale the weights, making sure we mostly scale down
        a_hat = a * K.clip(s_hat / s, self.min_decrease, self.max_increase)

        # Scale again so that the reported loss is comparable to the other ones
        t = 1
        #sT = K.transpose(s)
        #t = K.dot(sT, a) / K.dot(sT, a_hat)

        return K.stop_gradient([a_hat * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, x1 = x
        a = x1[:, :1]
        s_hat = x1[:, 1:2]

        # Rescale the weights, making sure we mostly scale down
        a_hat = a * K.clip(s_hat / s, self.min_decrease, self.max_increase)

        # Scale again so that the reported loss is comparable to the other ones
        t = 1
        #sT = K.transpose(s)
        #t = K.dot(sT, a) / K.dot(sT, a_hat)

        return K.stop_gradient([a_hat * t])[0]
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs):
        W = self.kernel
        if self.tied_k:
            kX = self.k[0] * inputs
            kX = K.clip(kX, -30, 30)
            wekx = W[None, :, :] * K.exp(kX[:, :, None])
        else:
            kX = self.k[None, None, :, None, None] * inputs[:, :, None, :, :]
            kX = K.clip(kX, -30, 30)
            wekx = W[None, :, :, None, None] * K.exp(kX)
        output = K.sum(inputs[:, :, None, :, :] * wekx, axis=1) / (K.sum(wekx, axis=1) + K.epsilon())
        return output
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        def noised():
            stddev = K.stop_gradient(K.sqrt(K.clip(self.factor * K.abs(inputs),
                                                   self.epsilon, None)))
            return inputs + K.random_normal(shape=K.shape(inputs),
                                            mean=0.0,
                                            stddev=stddev)
        return K.in_train_phase(noised, inputs, training=training)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # W = K.softplus(10.*self.kernel)/10.
        W = self.kernel
        if self.tied_k:
            kX = self.k[0] * x
            kX = K.clip(kX, -30, 30)
            wekx = W[None, :, :] * K.exp(kX[:, :, None])
        else:
            kX = self.k[None, None, :] * x[:, :, None]
            kX = K.clip(kX, -30, 30)
            wekx = W[None, :, :] * K.exp(kX)
        output = K.sum(x[:, :, None] * wekx, axis=1) / (K.sum(wekx, axis=1) + K.epsilon())
        return output
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def _get_tower_gradvars(self, loss, params):
        gdev_list = self._gdev_list

        # tower parallelization
        global_scope = tf.get_variable_scope()
        tower_gradvars = []
        for idev, device in enumerate(gdev_list):
            # tf.variable_scope('GPU_%i' % idev), \
            with tf.device(device), \
                    tf.variable_scope(global_scope, reuse=idev > 0), \
                    tf.name_scope('tower_%i' % idev):
                # tf.gradients returns list of `sum(dy/dx)`. The gradients
                # are aggregated by all_avg_gradients. Something doesn't seem
                # right though. SOMEWHAT SLOW.
                # TODO: Need to figure out how to efficiently aggregate.
                colo = True if not self._usenccl else not have_nccl
                # colo = True
                grads = tf.gradients(
                    loss, params,
                    # # GATE_NONE faster??
                    # gate_gradients=tf.train.Optimizer.GATE_NONE,
                    colocate_gradients_with_ops=colo)  # not have_nccl

                if hasattr(self, 'clipnorm') and self.clipnorm > 0:
                    norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
                    grads = [clip_norm(g, self.clipnorm, norm) for g in grads]

                if hasattr(self, 'clipvalue') and self.clipvalue > 0:
                    grads = [K.clip(g, -self.clipvalue, self.clipvalue)
                             for g in grads]

                gradvars = zip(grads, params)
                tower_gradvars.append(gradvars)

        tower_gradvars = all_avg_gradients(tower_gradvars, gdev_list,
                                           usenccl=self._usenccl)

        return tower_gradvars
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x_abs = K.sqrt(self.epsilon + x**2 + x[:,self.swap_re_im]**2)
        if self.flag_clip:
            x_abs = K.clip(x_abs,self.clip_min,self.clip_max)
        rescale = K.tanh(x_abs)/(x_abs + self.epsilon)
        return rescale * x
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x_abs = K.sqrt(self.epsilon + x**2 + x[:,self.swap_re_im]**2)
        x_abs = K.clip(x_abs,0.,1-3e-8)
        rescale = T.arctanh(x_abs)/(x_abs + self.epsilon)
        return rescale * x
项目:foolbox    作者:bethgelab    | 项目源码 | 文件源码
def _to_logits(self, predictions):
        from keras import backend as K
        eps = 10e-8
        predictions = K.clip(predictions, eps, 1 - eps)
        predictions = K.log(predictions)
        return predictions
项目:FCN_via_keras    作者:k3nt0w    | 项目源码 | 文件源码
def call(self, x, mask=None):
        e = K.exp(x - K.max(x, axis=1, keepdims=True))
        s = K.sum(e, axis=1, keepdims=True)
        return K.clip(e/s, 1e-7, 1)
项目:botcycle    作者:D2KLab    | 项目源码 | 文件源码
def f1_score(y_true, y_pred):
    from keras import backend as K

    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=0)
    supports = K.sum(K.round(K.clip(y_true, 0, 1)), axis=0)
    predict_distr = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)
    precisions = true_positives / predict_distr
    recalls = true_positives / supports
    f1_scores = 2 * (precisions * recalls) / (precisions + recalls)
    # get 0 instead of NaN
    f1_scores = tf.where(tf.is_nan(f1_scores), tf.zeros_like(f1_scores), f1_scores)
    f1 = K.sum(f1_scores * supports) / K.sum(supports)
    return f1
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def precision(y_true, y_pred):
    '''Calculates the precision, a metric for multi-label classification of
    how many selected items are relevant.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def recall(y_true, y_pred):
    '''Calculates the recall, a metric for multi-label classification of
    how many relevant items are selected.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def per_class_precision(classes):

    def class_precision(y_true, y_pred):
        '''Calculates the per class recall
        '''
        precisions = {}
        true_positives = K.sum(K.round(K.clip(y_true*y_pred, 0, 1)), axis=0)
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=0)

        for i,c in enumerate(classes):
            precisions[c+'_PRECISION'] = true_positives[i] / (predicted_positives[i] + K.epsilon())

        return precisions
    return class_precision
项目:kaggle_amazon    作者:asanakoy    | 项目源码 | 文件源码
def precision(y_true, y_pred):
    """Precision metric.

    Only computes a batch-wise average of precision.

    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
项目:kaggle_amazon    作者:asanakoy    | 项目源码 | 文件源码
def recall(y_true, y_pred):
    """Recall metric.

    Only computes a batch-wise average of recall.

    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall