Python keras.backend 模块,pow() 实例源码

我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用keras.backend.pow()

项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        half_n = self.n // 2
        squared = K.square(x)
        pooled = K.pool2d(squared, (half_n, half_n), strides=(1, 1),
                         padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def sharpen(_weight_t, scalar_gama_t):
    '''
    The convolution operation in convolutional shift can cause leakage or
    dispersion of weights over time if the shift weighting is no sharp.
    For example, if shift of -1, 0 and 1 are given weights of 0.1, 0.8,
    and 0.1, the rotation will transform a weighting focused at single
    point into one slightly blurred over three points. To combat this,
    each head emits one further scalar \gama >= 1 whose effect is sharpen
    the final weighting as follows:
    $$w_{i}^{(t)} = \frac{(\hat{w}_{i}^{(t)})^{\gama}}
    {\sum_{j}\hat{w}_{j}^{(t)})^{\gama}}$$
    :param _weight_t: the weight vector which denotes a memory address.
    :param scalar_gama_t: the scalar for sharpen.
    :return: the sharpened weight.
    '''
    weight_t = K.pow(_weight_t, scalar_gama_t)
    return weight_t / K.sum(weight_t)
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, s_hat = x

        # Compute the variables defined in the class comment
        S2 = K.sum(s)
        S1 = s_hat[0, 1]
        N = s_hat[0, 0]

        # Compute the unbiased weights
        a2 = (S1 + S2) / N / s

        # Compute the biased weights and the scaling factor t
        a1 = K.pow(a2, self.k)
        sT = K.transpose(s)
        t = K.dot(sT, a2) / K.dot(sT, a1)

        return K.stop_gradient([a1 * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, s_hat = x

        # Compute the variables defined in the class comment
        S2 = K.sum(s)
        S1 = s_hat[0, 1]
        N = s_hat[0, 0]

        # Compute the unbiased weights
        a2 = (S1 + S2) / N / s

        # Compute the biased weights and the scaling factor t
        a1 = K.pow(a2, self.k)
        sT = K.transpose(s)
        t = K.dot(sT, a2) / K.dot(sT, a1)

        return K.stop_gradient([a1 * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, s_hat = x

        # Compute the variables defined in the class comment
        S2 = K.sum(s)
        S1 = s_hat[0, 1]
        N = s_hat[0, 0]

        # Compute the unbiased weights
        a2 = (S1 + S2) / N / s

        # Compute the biased weights and the scaling factor t
        a1 = K.pow(a2, self.k)
        sT = K.transpose(s)
        t = K.dot(sT, a2) / K.dot(sT, a1)

        return K.stop_gradient([a1 * t])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x):
        s, s_hat = x

        # Compute the variables defined in the class comment
        S2 = K.sum(s)
        S1 = s_hat[0, 1]
        N = s_hat[0, 0]

        # Compute the unbiased weights
        a2 = (S1 + S2) / N / s

        # Compute the biased weights and the scaling factor t
        a1 = K.pow(a2, self.k)
        sT = K.transpose(s)
        t = K.dot(sT, a2) / K.dot(sT, a1)

        return K.stop_gradient([a1 * t])[0]
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        half_n = self.n // 2
        squared = K.square(x)
        pooled = K.pool2d(squared, (half_n, half_n), strides=(1, 1),
                         padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom
项目:kapre    作者:keunwoochoi    | 项目源码 | 文件源码
def call(self, x):
        power_spectrogram = super(Melspectrogram, self).call(x)
        # now,  th: (batch_sample, n_ch, n_freq, n_time)
        #       tf: (batch_sample, n_freq, n_time, n_ch)
        if self.image_data_format == 'channels_first':
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
        else:
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
        # now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
        output = K.dot(power_spectrogram, self.freq2mel)
        if self.image_data_format == 'channels_first':
            output = K.permute_dimensions(output, [0, 1, 3, 2])
        else:
            output = K.permute_dimensions(output, [0, 3, 2, 1])
        if self.power_melgram != 2.0:
            output = K.pow(K.sqrt(output), self.power_melgram)
        if self.return_decibel_melgram:
            output = backend_keras.amplitude_to_decibel(output)
        return output
项目:MMdnn    作者:Microsoft    | 项目源码 | 文件源码
def _layer_LRN(self):
        self.add_body(0, '''
from keras.layers.core import Layer
class LRN(Layer):

    def __init__(self, size=5, alpha=0.0005, beta=0.75, k=2, **kwargs):
        self.n = size
        self.alpha = alpha
        self.beta = beta
        self.k = k
        super(LRN, self).__init__(**kwargs)

    def build(self, input_shape):
        self.shape = input_shape
        super(LRN, self).build(input_shape)

    def call(self, x, mask=None):
        half_n = self.n - 1
        squared = K.square(x)
        scale = self.k
        norm_alpha = self.alpha / (2 * half_n + 1)
        if K.image_dim_ordering() == "th":
            b, f, r, c = self.shape
            squared = K.expand_dims(squared, 0)
            squared = K.spatial_3d_padding(squared, padding=((half_n, half_n), (0, 0), (0,0)))
            squared = K.squeeze(squared, 0)
            for i in range(half_n*2+1):
                scale += norm_alpha * squared[:, i:i+f, :, :]
        else:
            b, r, c, f = self.shape
            squared = K.expand_dims(squared, -1)
            squared = K.spatial_3d_padding(squared, padding=((0, 0), (0,0), (half_n, half_n)))
            squared = K.squeeze(squared, -1)
            for i in range(half_n*2+1):
                scale += norm_alpha * squared[:, :, :, i:i+f]

        scale = K.pow(scale, self.beta)
        return x / scale

    def compute_output_shape(self, input_shape):
        return input_shape''')
项目:Deep-Learning-Plugin    作者:flowjo-lakes    | 项目源码 | 文件源码
def RaphyKernel(self,X,Y):
        #expand dist to a 1xnxm tensor where the 1 is broadcastable
        sQdist = K.expand_dims(squaredDistance(X,Y),0) 
        #expand scales into a px1x1 tensor so we can do an element wise exponential
        self.scales = K.expand_dims(K.expand_dims(self.scales,-1),-1)
        #expand scales into a px1x1 tensor so we can do an element wise exponential
        self.weights = K.expand_dims(K.expand_dims(self.weights,-1),-1)
        #calculated the kernal for each scale weight on the distance matrix and sum them up
        return K.sum(self.weights*K.exp(-sQdist / (K.pow(self.scales,2))),0)

    #Calculate the MMD cost
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, x, mask=None):
        input_shape = K.int_shape(x)
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]
        alpha_pos = K.reshape(self.alpha_pos, broadcast_shape)
        alpha_neg = K.reshape(self.alpha_neg, broadcast_shape)
        beta_pos = K.reshape(self.beta_pos, broadcast_shape)
        beta_neg = K.reshape(self.beta_neg, broadcast_shape)
        rho_pos = K.reshape(self.rho_pos, broadcast_shape)
        rho_neg = K.reshape(self.rho_neg, broadcast_shape)
        pos = alpha_pos * K.pow(K.relu(x + beta_pos) + K.epsilon(), rho_pos)
        neg = alpha_neg * K.pow(K.relu(-x + beta_neg) + K.epsilon(), rho_neg)
        return pos + neg
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]
        alpha = K.reshape(self.alpha, broadcast_shape)
        rho = K.reshape(self.rho, broadcast_shape)

        return alpha * K.pow(K.relu(x) + K.epsilon(), rho)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        if K.backend() == 'theano':
            a = K.pattern_broadcast(self.a, self.a_param_broadcast)
            k = K.pattern_broadcast(self.k, self.k_param_broadcast)
            n = K.pattern_broadcast(self.n, self.n_param_broadcast)
            z = K.pattern_broadcast(self.z, self.z_param_broadcast)
        else:
            a = self.a
            k = self.k
            n = self.n
            z = self.z
        return a / (K.pow((k / (inputs + 1e-5)), n) + z + 1e-5)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, x, mask=None):
        x = K.pow(K.relu(x) + K.epsilon(), self.rho)
        output = self.alpha + (K.dot(x, self.beta_delta) / (K.dot(x, self.gamma_eta) + 1.))
        return output
项目:Parametric-t-SNE-in-Keras    作者:zaburo-ch    | 项目源码 | 文件源码
def KLdivergence(P, Y):
    alpha = low_dim - 1.
    sum_Y = K.sum(K.square(Y), axis=1)
    eps = K.variable(10e-15)
    D = sum_Y + K.reshape(sum_Y, [-1, 1]) - 2 * K.dot(Y, K.transpose(Y))
    Q = K.pow(1 + D / alpha, -(alpha + 1) / 2)
    Q *= K.variable(1 - np.eye(batch_size))
    Q /= K.sum(Q)
    Q = K.maximum(Q, eps)
    C = K.log((P + eps) / (Q + eps))
    C = K.sum(P * C)
    return C
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def continuity_loss(args, x):
    assert K.ndim(x) == 4
    if K.image_data_format()== 'channels_first':
        a = K.square(x[:, :, :args.width - 1, :args.height - 1] -
                     x[:, :, 1:, :args.height - 1])
        b = K.square(x[:, :, :args.width - 1, :args.height - 1] -
                     x[:, :, :args.width - 1, 1:])
    else:
        a = K.square(x[:, :args.width - 1, :args.height-1, :] -
                     x[:, 1:, :args.height - 1, :])
        b = K.square(x[:, :args.width - 1, :args.height-1, :] -
                     x[:, :args.width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def alpha_norm(x, alpha=6, lambdaa=0.05):
    x -= K.mean(x)
    return lambdaa * K.pow(K.sum(x), alpha)
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def total_variation_norm(x):
    x -= K.mean(x)
    a = K.square(x[:, :, 1:, :-1] - x[:, :, :-1, :-1])
    b = K.square(x[:, :, :-1, 1:] - x[:, :, :-1, :-1])
    tv = K.sum(K.pow(a + b, 1.25))

    return tv
项目:BatchEffectRemoval    作者:ushaham    | 项目源码 | 文件源码
def RaphyKernel(self,X,Y):
        #expand dist to a 1xnxm tensor where the 1 is broadcastable
        sQdist = K.expand_dims(squaredDistance(X,Y),0) 
        #expand scales into a px1x1 tensor so we can do an element wise exponential
        self.scales = K.expand_dims(K.expand_dims(self.scales,-1),-1)
        #expand scales into a px1x1 tensor so we can do an element wise exponential
        self.weights = K.expand_dims(K.expand_dims(self.weights,-1),-1)
        #calculated the kernal for each scale weight on the distance matrix and sum them up
        return K.sum(self.weights*K.exp(-sQdist / (K.pow(self.scales,2))),0)

    #Calculate the MMD cost
项目:Super-Resolution-using-Generative-Adversarial-Networks    作者:titu1994    | 项目源码 | 文件源码
def __call__(self, x):
        assert K.ndim(x) == 4
        if K.image_dim_ordering() == 'th':
            a = K.square(x[:, :, :self.img_width - 1, :self.img_height - 1] - x[:, :, 1:, :self.img_height - 1])
            b = K.square(x[:, :, :self.img_width - 1, :self.img_height - 1] - x[:, :, :self.img_width - 1, 1:])
        else:
            a = K.square(x[:, :self.img_width - 1, :self.img_height - 1, :] - x[:, 1:, :self.img_height - 1, :])
            b = K.square(x[:, :self.img_width - 1, :self.img_height - 1, :] - x[:, :self.img_width - 1, 1:, :])
        loss = self.weight * K.mean(K.sum(K.pow(a + b, 1.25)))
        return loss
项目:kapre    作者:keunwoochoi    | 项目源码 | 文件源码
def call(self, x):
        output = self._spectrogram_mono(x[:, 0:1, :])
        if self.is_mono is False:
            for ch_idx in range(1, self.n_ch):
                output = K.concatenate((output,
                                        self._spectrogram_mono(x[:, ch_idx:ch_idx + 1, :])),
                                       axis=self.ch_axis_idx)
        if self.power_spectrogram != 2.0:
            output = K.pow(K.sqrt(output), self.power_spectrogram)
        if self.return_decibel_spectrogram:
            output = backend_keras.amplitude_to_decibel(output)
        return output
项目:wtte-rnn    作者:ragulpr    | 项目源码 | 文件源码
def loss_function(self, y_true, y_pred):
        def keras_split(y_true, y_pred):
            """
                Everything is a hack around the y_true,y_pred paradigm.
            """
            y, u = _keras_unstack_hack(y_true)
            a, b = _keras_unstack_hack(y_pred)

            return y, u, a, b

        def loglik_discrete(y, u, a, b, epsilon=1e-35):
            hazard0 = K.pow((y + epsilon) / a, b)
            hazard1 = K.pow((y + 1.0) / a, b)

            loglikelihoods = u * \
                K.log(K.exp(hazard1 - hazard0) - 1.0) - hazard1
            return loglikelihoods

        def loglik_continuous(y, u, a, b, epsilon=1e-35):
            ya = (y + epsilon) / a
            loglikelihoods = u * (K.log(b) + b * K.log(ya)) - K.pow(ya, b)
            return loglikelihoods

        def loglik_continuous_conditional_correction(y, u, a, b, epsilon=1e-35):
            """Integrated conditional excess loss.
                Explanation TODO
            """
            ya = (y + epsilon) / a
            loglikelihoods = y * \
                (u * (K.log(b) + b * K.log(ya)) - (b / (b + 1.)) * K.pow(ya, b))
            return loglikelihoods

        def penalty_term(b, location, growth):
            scale = growth / location
            penalty = K.exp(scale * (b - location))
            return penalty

        def accumulate_loss(loglikelihoods):
            loss = -1.0 * K.mean(loglikelihoods, axis=-1)
            return loss

        y, u, a, b = keras_split(y_true, y_pred)

        if self.kind == 'discrete':
            loglikelihoods = loglik_discrete(y, u, a, b)
        elif self.kind == 'continuous':
            loglikelihoods = loglik_continuous(y, u, a, b)

        if self.regularize:
            loglikelihoods = loglikelihoods + \
                penalty_term(b, self.location, self.growth)

        if self.reduce_loss:
            loss = accumulate_loss(loglikelihoods)
        else:
            loss = -loglikelihoods

        return loss
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.inital_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        f = K.variable(0)
        d = K.variable(1)
        self.weights = [self.iterations] + ms + vs + [f, d]

        cond = K.greater(t, K.variable(1))
        small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1))
        big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1, 1. / (self.small_k + 1))

        c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t)
        f_t = c_t * f
        r_t = K.abs(f_t - f) / (K.minimum(f_t, f))
        d_t = self.beta_3 * d + (1 - self.beta_3) * r_t

        f_t = K.switch(cond, f_t, loss)
        d_t = K.switch(cond, d_t, K.variable(1.))

        self.updates.append(K.update(f, f_t))
        self.updates.append(K.update(d, d_t))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates