Python keras.backend 模块,abs() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.abs()

项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def distance_layer(x1, x2):
    """Distance and angle of two inputs.

    Compute the concatenation of element-wise subtraction and
    multiplication of two inputs.

    """
    def _distance(args):
        x1 = args[0]
        x2 = args[1]
        x = K.abs(x1 - x2)
        return x

    def _multiply(args):
        x1 = args[0]
        x2 = args[1]
        return x1 * x2

    distance = Lambda(_distance, output_shape=(K.int_shape(x1)[-1],))([x1, x2])
    multiply = Lambda(_multiply, output_shape=(K.int_shape(x1)[-1],))([x1, x2])
    return concatenate([distance, multiply])
项目:AerialCrackDetection_Keras    作者:TTMRonald    | 项目源码 | 文件源码
def rpn_loss_regr(num_anchors):
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

    return rpn_loss_regr_fixed_num
项目:keras-frcnn    作者:yhenon    | 项目源码 | 文件源码
def rpn_loss_regr(num_anchors):
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

    return rpn_loss_regr_fixed_num
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __call__(self, x1, x2):
        def _sub_ops(args):
            x1 = args[0]
            x2 = args[1]
            x = K.abs(x1 - x2)
            return x

        def _mult_ops(args):
            x1 = args[0]
            x2 = args[1]
            return x1 * x2

        output_shape = (self.sequence_length, self.input_dim,)
        sub = Lambda(_sub_ops, output_shape=output_shape)([x1, x2])
        mult = Lambda(_mult_ops, output_shape=output_shape)([x1, x2])
        sub = self.model(sub)
        mult = self.model(mult)
        return concatenate([sub, mult])
项目:Gene-prediction    作者:sriram2093    | 项目源码 | 文件源码
def rpn_loss_regr(num_anchors):
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
            #return K.sum(x_abs)
            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

    return rpn_loss_regr_fixed_num
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def ori_acc_delta_k(y_true, y_pred, k=10, max_delta=180):
    # get ROI
    label_seg = K.sum(y_true, axis=-1)
    label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32) 
    # get pred angle    
    angle = K.cast(K.argmax(ori_highest_peak(y_pred, max_delta), axis=-1), dtype=K.tf.float32)*2.0+1.0
    # get gt angle
    angle_t = K.cast(K.argmax(y_true, axis=-1), dtype=K.tf.float32)*2.0+1.0
    # get delta
    angle_delta = K.abs(angle_t - angle)
    acc = K.tf.less_equal(K.minimum(angle_delta, max_delta-angle_delta), k)
    acc = K.cast(acc, dtype=K.tf.float32)
    # apply ROI
    acc = acc*label_seg
    acc = K.sum(acc) / (K.sum(label_seg)+K.epsilon())
    return acc
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def ori_acc_delta_k(y_true, y_pred, k=10, max_delta=180):
    # get ROI
    label_seg = K.sum(y_true, axis=-1)
    label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32) 
    # get pred angle    
    angle = K.cast(K.argmax(ori_highest_peak(y_pred, max_delta), axis=-1), dtype=K.tf.float32)*2.0+1.0
    # get gt angle
    angle_t = K.cast(K.argmax(y_true, axis=-1), dtype=K.tf.float32)*2.0+1.0
    # get delta
    angle_delta = K.abs(angle_t - angle)
    acc = K.tf.less_equal(K.minimum(angle_delta, max_delta-angle_delta), k)
    acc = K.cast(acc, dtype=K.tf.float32)
    # apply ROI
    acc = acc*label_seg
    acc = K.sum(acc) / (K.sum(label_seg)+K.epsilon())
    return acc
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        t = inputs
        timegate = K.abs(self.timegate)
        period = timegate[0]
        shift = timegate[1]
        r_on = timegate[2]

        phi = ((t - shift) % period) / period
        # K.switch not consistent between Theano and Tensorflow backend,
        # so write explicitly.
        # TODO check if still the case
        up = K.cast(K.less(phi, r_on * 0.5), K.floatx()) * 2 * phi / r_on
        mid = (
            K.cast(K.less(phi, r_on), K.floatx()) *
            K.cast(K.greater(phi, r_on * 0.5), K.floatx()) *
            (2 - (2 * phi / r_on))
        )
        end = K.cast(K.greater(phi, r_on * 0.5), K.floatx()) * self.alpha * phi
        k = up + mid + end

        return k
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def huberishLoss_noUnc(y_true, x_pred):


    dxrel=(x_pred - y_true)/1#(K.clip(K.abs(y_true+0.1),K.epsilon(),None))
    dxrel=K.clip(dxrel,-1e6,1e6)

    #defines the inverse of starting point of the linear behaviour
    scaler=2

    dxabs=K.abs(scaler* dxrel)
    dxsq=K.square(scaler * dxrel)
    dxp4=K.square(dxsq)

    lossval=dxsq / (1+dxp4) + (2*dxabs -1)/(1 + 1/dxp4)
    #K.clip(lossval,-1e6,1e6)

    return K.mean( lossval , axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def loss_logcosh(y_true, x):
    """
    This loss implements a logcosh loss with a dummy for the uncertainty.
    It approximates a mean-squared loss for small differences and a linear one for
    large differences, therefore it is conceptually similar to the Huber loss.
    This loss here is scaled, such that it start becoming linear around 4-5 sigma
    """
    scalefactor_a=30
    scalefactor_b=0.4

    from tensorflow import where, greater, abs, zeros_like, exp

    x_pred = x[:,1:]
    x_sig = x[:,:1]
    def cosh(y):
        return (K.exp(y) + K.exp(-y)) / 2

    return K.mean(0.5*K.square(x_sig))   + K.mean(scalefactor_a* K.log(cosh( scalefactor_b*(x_pred - y_true))), axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def loss_logcosh_noUnc(y_true, x_pred):
    """
    This loss implements a logcosh loss without a dummy for the uncertainty.
    It approximates a mean-squared loss for small differences and a linear one for
    large differences, therefore it is conceptually similar to the Huber loss.
    This loss here is scaled, such that it start becoming linear around 4-5 sigma
    """
    scalefactor_a=1.
    scalefactor_b=3.

    from tensorflow import where, greater, abs, zeros_like, exp

    dxrel=(x_pred - y_true)/(y_true+0.0001)
    def cosh(x):
        return (K.exp(x) + K.exp(-x)) / 2

    return scalefactor_a*K.mean( K.log(cosh(scalefactor_b*dxrel)), axis=-1)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def mean_log_LaPlace_like(y_true, parameters):
    """Mean Log Laplace Likelihood distribution
    Note: The 'c' variable is obtained as global variable
    """
    #Note: The output size will be (c + 2) * m = 6
    c = 1 #The number of outputs we want to predict
    m = 2 #The number of distributions we want to use in the mixture
    components = K.reshape(parameters,[-1, c + 2, m])
    mu = components[:, :c, :]
    sigma = components[:, c, :]
    alpha = components[:, c + 1, :]
    alpha = K.softmax(K.clip(alpha,1e-2,1.))

    exponent = K.log(alpha) - float(c) * K.log(2 * sigma) \
    - K.sum(K.abs(K.expand_dims(y_true,2) - mu), axis=1)/(sigma)

    log_gauss = log_sum_exp(exponent, axis=1)
    res = - K.mean(log_gauss)
    return res
项目:FasterRCNN_KERAS    作者:akshaylamba    | 项目源码 | 文件源码
def rpn_loss_regr(num_anchors):
    def rpn_loss_regr_fixed_num(y_true, y_pred):
        if K.image_dim_ordering() == 'th':
            x = y_true[:, 4 * num_anchors:, :, :] - y_pred
            x_abs = K.abs(x)
            x_bool = K.less_equal(x_abs, 1.0)
            return lambda_rpn_regr * K.sum(
                y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
        else:
            x = y_true[:, :, :, 4 * num_anchors:] - y_pred
            x_abs = K.abs(x)
            x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

            return lambda_rpn_regr * K.sum(
                y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

    return rpn_loss_regr_fixed_num
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def _hard_sigmoid(x):
    '''Hard sigmoid different from the more conventional form (see definition of K.hard_sigmoid).

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    x = (0.5 * x) + 0.5
    return K.clip(x, 0, 1)
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binary_sigmoid(x):
    '''Binary hard sigmoid for training binarized neural network.

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    return round_through(_hard_sigmoid(x))
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binary_tanh(x):
    '''Binary hard sigmoid for training binarized neural network.
     The neurons' activations binarization function
     It behaves like the sign function during forward propagation
     And like:
        hard_tanh(x) = 2 * _hard_sigmoid(x) - 1 
        clear gradient when |x| > 1 during back propagation

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    return 2 * round_through(_hard_sigmoid(x)) - 1
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binarize(W, H=1):
    '''The weights' binarization function, 

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    # [-H, H] -> -H or H
    Wb = H * binary_tanh(W / H)
    return Wb
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def _mean_abs(x, axis=None, keepdims=False):
    return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binary_sigmoid(x):
    '''Binary hard sigmoid for training binarized neural network.

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    return round_through(_hard_sigmoid(x))
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binary_tanh(x):
    '''Binary hard sigmoid for training binarized neural network.
     The neurons' activations binarization function
     It behaves like the sign function during forward propagation
     And like:
        hard_tanh(x) = 2 * _hard_sigmoid(x) - 1 
        clear gradient when |x| > 1 during back propagation

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    return 2 * round_through(_hard_sigmoid(x)) - 1
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def binarize(W, H=1):
    '''The weights' binarization function, 

    # Reference:
    - [BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. 2016](http://arxiv.org/abs/1602.02830}

    '''
    # [-H, H] -> -H or H
    Wb = H * binary_tanh(W / H)
    return Wb
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def _mean_abs(x, axis=None, keepdims=False):
    return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def cosine_dist(self, inputs):
        """Define a function for a lambda layer of a model."""

        input1, input2 = inputs
        a = K.abs(input1-input2)
        b = multiply(inputs)
        return K.concatenate([a, b])
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def cosine_dist(self, inputs):
        input1, input2 = inputs
        a = K.abs(input1-input2)
        b = multiply(inputs)
        return K.concatenate([a, b])
项目:AerialCrackDetection_Keras    作者:TTMRonald    | 项目源码 | 文件源码
def class_loss_regr(num_classes):
    def class_loss_regr_fixed_num(y_true, y_pred):
        x = y_true[:, :, 4*num_classes:] - y_pred
        x_abs = K.abs(x)
        x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
        return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
    return class_loss_regr_fixed_num
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)
项目:keras-frcnn    作者:yhenon    | 项目源码 | 文件源码
def class_loss_regr(num_classes):
    def class_loss_regr_fixed_num(y_true, y_pred):
        x = y_true[:, :, 4*num_classes:] - y_pred
        x_abs = K.abs(x)
        x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
        return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
    return class_loss_regr_fixed_num
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_error(s, height, width, base):
    P = len(setting['panels'])
    s = K.reshape(s,[-1,height,base,width,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,height,width,1,base,base])
    s = K.tile(s, [1,1,1,P,1,1,])

    allpanels = K.variable(np.array(setting['panels']))
    allpanels = K.reshape(allpanels, [1,1,1,P,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], height, width, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        x = K.reshape(x, [-1,height,width,P, base//2, 2, base//2, 2])
        x = K.mean(x, axis=(5,7))
        return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        # return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_errors(states,base,pad,dim,size):
    # address the numerical viscosity in swirling
    s = K.round(states+viscosity_adjustment)
    s = Reshape((dim+2*pad,dim+2*pad,1))(s)
    s = Cropping2D(((pad,pad),(pad,pad)))(s)
    s = K.reshape(s,[-1,size,base,size,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,size,size,1,base,base])
    s = K.tile   (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2

    allpanels = K.variable(panels)
    allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        x = K.reshape(x, [-1,size,size,2, base//3, 3, base//3, 3])
        x = K.mean(x, axis=(5,7))
        return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        # return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_errors(states,base,dim,size):
    s = K.reshape(states,[-1,size,base,size,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,size,size,1,base,base])
    s = K.tile   (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2

    allpanels = K.variable(panels)
    allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        # x = K.reshape(x, [-1,size,size,2, base//2, 2, base//2, 2])
        # x = K.mean(x, axis=(5,7))
        # return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_error(s, disks, towers, tower_width, panels):
    s = K.reshape(s,[-1,disks, disk_height, towers, tower_width])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,disks,towers,1,    disk_height,tower_width])
    s = K.tile   (s,[1, 1, 1, disks+1,1, 1,])

    allpanels = K.variable(panels)
    allpanels = K.reshape(allpanels, [1,1,1,disks+1,disk_height,tower_width])
    allpanels = K.tile(allpanels, [K.shape(s)[0], disks, towers, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing (now it does not work since disks have 1 pixel height)
        # x = K.reshape(x, [-1,disks,towers,disks+1, disk_height,tower_width//2,2])
        # x = K.mean(x, axis=(4,))
        # return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        return K.round(x)
        ## do nothing
        # return x

    s         = hash(s)
    allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = K.mean(error, axis=(4,5))
    return error
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def jaccard_distance(y_true, y_pred, smooth=100):
    """Jaccard distance is an intersection-over-union loss for semantic segmentation

    This loss is useful when you have unbalanced numbers of pixels within an image
    because it gives all classes equal weight. However, it is not the defacto
    standard for image segmentation.

    For example, assume you are trying to predict if each pixel is cat, dog, or background.
    You have 80% background pixels, 10% dog, and 10% cat. If the model predicts 100% background
    should it be be 80% right (as with categorical cross entropy) or 30% (with this loss)?

    The loss has been modified to have a smooth gradient as it converges on zero.
    This has been shifted so it converges on 0 and is smoothed to avoid exploding
    or disappearing gradient.

    Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
            = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))

    # References

    Csurka, Gabriela & Larlus, Diane & Perronnin, Florent. (2013).
    What is a good evaluation measure for semantic segmentation?.
    IEEE Trans. Pattern Anal. Mach. Intell.. 26. . 10.5244/C.27.32.

    https://en.wikipedia.org/wiki/Jaccard_index

    """
    intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
    sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
    jac = (intersection + smooth) / (sum_ - intersection + smooth)
    return (1 - jac) * smooth
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_saliency(image, model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss, model.input)[0])
    saliency = K.max(grads, axis=3)
    fetch_saliency = K.function([model.input], [loss, saliency])
    outputs, saliency = fetch_saliency([image])
    K.set_learning_phase(True)
    return saliency
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return K.abs(x[0]- x[1])
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def call(self, x, mask=None):
        inp1, inp2 = x[0],x[1]
        return K.abs(inp1-inp2)
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return K.abs(x[0]- x[1])
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return K.abs(x[0]- x[1])
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __call__(self, x):
        regularization = 0
        if self.l1:
            regularization += self.l1 * K.sum(K.abs(K.sum(x, axis=self.axis) - 1.))
        if self.l2:
            regularization += self.l2 * K.sum(K.square(K.sum(x, axis=self.axis) - 1.))
        return regularization
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __call__(self, x):
        regularization = 0
        dimorder = self.axis + list(set(range(K.ndim(x))) - set(self.axis))
        lp = laplacian1d(K.permute_dimensions(x, dimorder))
        if self.l1:
            regularization += K.sum(self.l1 * K.abs(lp))
        if self.l2:
            regularization += K.sum(self.l2 * K.square(lp))
        return regularization
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        def noised():
            stddev = K.stop_gradient(K.sqrt(K.clip(self.factor * K.abs(inputs),
                                                   self.epsilon, None)))
            return inputs + K.random_normal(shape=K.shape(inputs),
                                            mean=0.0,
                                            stddev=stddev)
        return K.in_train_phase(noised, inputs, training=training)
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
    """ Element-wise features from the pair fed to an MLP. """
    linear = Activation('linear')
    if sum_mode == 'absdiff':
        absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
                         output_shape=lambda shape: shape[0])
        # model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
        mlp_inputs = absdiff(inputs)
    elif sum_mode == 'sum':
        outsum = linear(add(inputs))
        outmul = linear(multiply(inputs))
        mlp_inputs = [outsum, outmul] + extra_inp

    def mlp_args(mlp_inputs):
        """ return model.add_node() args that are good for mlp_inputs list
        of both length 1 and more than 1. """
        if isinstance(mlp_inputs, list):
            mlp_inputs = concatenate(mlp_inputs)
        return mlp_inputs

    # Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
    # list (multiple hidden layers)
    if Ddim == 0:
        mlp_inputs = mlp_args(mlp_inputs)
        Ddim = []
    elif not isinstance(Ddim, list):
        Ddim = [Ddim]
    if Ddim:
        for i, D in enumerate(Ddim):
            mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
            # model.add_node(name=pfx+'hdn[%d]'%(i,),
            #                layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
            #                **mlp_args(mlp_inputs))
            # mlp_inputs = [pfx+'hdn[%d]'%(i,)]
    outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
    return outmlp
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def mlp_ptscorer(inputs, Ddim, N, l2reg, pfx='out', Dinit='glorot_uniform', sum_mode='sum', extra_inp=[]):
    """ Element-wise features from the pair fed to an MLP. """
    linear = Activation('linear')
    if sum_mode == 'absdiff':
        absdiff = Lambda(function=lambda x: K.abs(x[0] - x[1]),
                         output_shape=lambda shape: shape[0])
        # model.add_node(name=pfx+'sum', layer=absdiff_merge(model, inputs))
        mlp_inputs = absdiff(inputs)
    elif sum_mode == 'sum':
        outsum = linear(add(inputs))
        outmul = linear(multiply(inputs))
        mlp_inputs = [outsum, outmul] + extra_inp

    def mlp_args(mlp_inputs):
        """ return model.add_node() args that are good for mlp_inputs list
        of both length 1 and more than 1. """
        if isinstance(mlp_inputs, list):
            mlp_inputs = concatenate(mlp_inputs)
        return mlp_inputs

    # Ddim may be either 0 (no hidden layer), scalar (single hidden layer) or
    # list (multiple hidden layers)
    if Ddim == 0:
        mlp_inputs = mlp_args(mlp_inputs)
        Ddim = []
    elif not isinstance(Ddim, list):
        Ddim = [Ddim]
    if Ddim:
        for i, D in enumerate(Ddim):
            mlp_inputs = Dense(int(N*D), activation='tanh', kernel_initializer=Dinit, kernel_regularizer=l2(l2reg))(mlp_args(mlp_inputs))
            # model.add_node(name=pfx+'hdn[%d]'%(i,),
            #                layer=Dense(output_dim=int(N*D), W_regularizer=l2(l2reg), activation='tanh', init=Dinit),
            #                **mlp_args(mlp_inputs))
            # mlp_inputs = [pfx+'hdn[%d]'%(i,)]
    outmlp = Dense(1, kernel_regularizer=l2(l2reg))(mlp_inputs)
    return outmlp
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def minimize(y_true, y_pred):
        return K.abs(K.mean(y_pred, axis=-1))
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def smooth_l1(self, y_true, y_pred):
        absolute_value_loss = K.abs(y_true - y_pred) - 0.5
        square_loss = 0.5 * (y_true - y_pred)**2
        absolute_value_condition = K.less(absolute_value_loss, 1.0)
        l1_smooth_loss = tf.where(absolute_value_condition, square_loss,
                                  absolute_value_loss)
        return K.sum(l1_smooth_loss, axis=-1)
项目:Gene-prediction    作者:sriram2093    | 项目源码 | 文件源码
def rpn_val_loss_regr_fixed_num(y_true, y_pred):
    if K.image_dim_ordering() == 'th':
        x = y_true[:, 4 * num_anchors:, :, :] - y_pred
        x_abs = K.abs(x)
        x_bool = K.less_equal(x_abs, 1.0)
        return lambda_rpn_regr * K.sum(
            y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
    else:
        x = y_true[:, :, :, 4 * num_anchors:] - y_pred
        x_abs = K.abs(x)
        x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
        #return K.sum(X_abs)

        return lambda_rpn_regr * K.sum(
            y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
项目:Gene-prediction    作者:sriram2093    | 项目源码 | 文件源码
def class_loss_regr(num_classes):
    def class_loss_regr_fixed_num(y_true, y_pred):
        x = y_true[:, :, 4*num_classes:] - y_pred
        x_abs = K.abs(x)
        x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
        #return K.sum(x_abs)
        return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
    return class_loss_regr_fixed_num
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        if hasattr(tf, 'select'):
            return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
        else:
            return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
    elif K.backend() == 'theano':
        from theano import tensor as T
        return T.switch(condition, squared_loss, linear_loss)
    else:
        raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)