Python keras.backend 模块,random_uniform_variable() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.backend.random_uniform_variable()

项目:Deep-Learning-Plugin    作者:flowjo-lakes    | 项目源码 | 文件源码
def KerasCost(self,y_true, y_pred):
        #create a random subsampling of the target instances for the test set
        #This is rarely going to hit the last entry
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetTrainSize-1)),IntType)
        #this is a subset operation (not a very pretty way to do it)
        MMDTargetSampleTrain = K.gather(self.MMDTargetTrain,sample)
        #do the same for the validation set
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetValidationSize-1)),IntType)
        #and the subset operation
        MMDTargetSampleValidation = K.gather(self.MMDTargetValidation,sample)
        #create the sample based on whether we are in training or validation steps
        MMDtargetSample = K.in_train_phase(MMDTargetSampleTrain, MMDTargetSampleValidation) 
        #return the MMD cost for this subset
        ret= self.cost(self.MMDLayer,MMDtargetSample)
        #pretty dumb but y_treu has to be in the cost for keras to not barf when cleaning up
        ret = ret + 0*K.sum(y_pred)+0*K.sum(y_true)
        return ret
项目:BatchEffectRemoval    作者:ushaham    | 项目源码 | 文件源码
def KerasCost(self,y_true, y_pred):
        #create a random subsampling of the target instances for the test set
        #This is rarely going to hit the last entry
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetTrainSize-1)),IntType)
        #this is a subset operation (not a very pretty way to do it)
        MMDTargetSampleTrain = K.gather(self.MMDTargetTrain,sample)
        #do the same for the validation set
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetValidationSize-1)),IntType)
        #and the subset operation
        MMDTargetSampleValidation = K.gather(self.MMDTargetValidation,sample)
        #create the sample based on whether we are in training or validation steps
        MMDtargetSample = K.in_train_phase(MMDTargetSampleTrain, MMDTargetSampleValidation) 
        #return the MMD cost for this subset
        ret= self.cost(self.MMDLayer,MMDtargetSample)
        #pretty dumb but y_treu has to be in the cost for keras to not barf when cleaning up
        ret = ret + 0*K.sum(y_pred)+0*K.sum(y_true)
        return ret
项目:ntee    作者:studio-ousia    | 项目源码 | 文件源码
def build(self, input_shape):
        if self.W is None:
            self.W = K.variable(np.identity(input_shape[0][2]))
        elif isinstance(self.W, np.ndarray):
            self.W = K.variable(self.W)
        else:
            raise RuntimeError()

        if self.b is None:
            self.b = K.random_uniform_variable((input_shape[0][2],), -0.05, 0.05)
        elif isinstance(self.b, np.ndarray):
            self.b = K.variable(self.b)
        else:
            raise RuntimeError()

        self.trainable_weights = [self.W, self.b]
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def _get_initial_sense_priors(shape, rate_range=None, name=None):
        # This returns a Keras variable with the initial values all being 0.5.
        if rate_range is None:
            low, high = 0.01, 0.99
        else:
            low, high = rate_range
        return K.random_uniform_variable(shape, low, high, name=name)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_full_matching_layer_f(self, input_dim_a, input_dim_b):
        """Create a full-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_last = Lambda(lambda x: x * W[i])(last_state)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_last = Lambda(lambda x: K.l2_normalize(x, -1))(outp_last)
            outp_last = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_full_matching_layer_b(self, input_dim_a, input_dim_b):
        """Create a full-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        val = np.concatenate((np.ones((1, 1)), np.zeros((self.max_sequence_length - 1, 1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0, 2, 1)))(inp_b_perm)
        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_last = Lambda(lambda x: x * W[i])(last_state)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_last = Lambda(lambda x: K.l2_normalize(x, -1))(outp_last)
            outp_last = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_last)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_maxpool_matching_layer(self, input_dim_a, input_dim_b):
        """Create a maxpooling-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_b = Lambda(lambda x: x * W[i])(inp_b)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(outp_b)
            outp_b = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_b)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp)
            outp = Lambda(lambda x: K.max(x, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_maxatt_matching_layer(self, input_dim_a, input_dim_b):
        """Create a max-attentive-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))

        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a)
        outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b)
        outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b)
        alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
        alpha = Lambda(lambda x: K.one_hot(K.argmax(x, 1), self.max_sequence_length))(alpha)
        hmax = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b])

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_hmax = Lambda(lambda x: x * W[i])(hmax)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_hmax = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmax)
            outp_hmax = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmax)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmax, outp_a])
            val = np.eye(self.max_sequence_length)
            kcon = K.constant(value=val, dtype='float32')
            outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_att_matching_layer(self, input_dim_a, input_dim_b):
        """Create an attentive-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))

        w = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            w.append(wi)

        outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a)
        outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b)
        outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b)
        alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
        alpha = Lambda(lambda x: K.l2_normalize(x, 1))(alpha)
        hmean = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b])

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * w[i])(inp_a)
            outp_hmean = Lambda(lambda x: x * w[i])(hmean)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_hmean = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmean)
            outp_hmean = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmean)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmean, outp_a])
            val = np.eye(self.max_sequence_length)
            kcon = K.constant(value=val, dtype='float32')
            outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def get_coordinates(matrix_shape, input_channels, num_filters, scale=1.0):
    """
    Return meshgrid coordinates. Flattened and stacked in columns.

    Parameters
    ----------
    matrix_shape : list_like
        Shape of the output matrix
    scale : float
        Range of the coordinate representation (-scale, scale)

    Returns
    -------
    coords : keras tensor
    """

    # Generate coordinate data
    x = np.arange(matrix_shape[0]) - matrix_shape[0] // 2
    y = np.arange(matrix_shape[1]) - matrix_shape[1] // 2
    c = np.arange(input_channels) - input_channels // 2
    f = np.arange(num_filters) - num_filters // 2

    x = x / (x.max() + 1)
    y = y / (y.max() + 1)
    c = c / (c.max() + 1)
    f = f / (f.max() + 1)  # to prevent division by zero

    x *= scale
    y *= scale
    c *= scale
    f *= scale
    # Generate coordinate data
    # the sequence in the meshgrid similar to output of generator
    F, C, X, Y = np.meshgrid(f, c, x, y)
    R = np.sqrt((X**2) + (Y**2) + (C**2) + (F**2))

    total_items = np.prod(matrix_shape) * num_filters * input_channels

    # Flatten
    Y_r = Y.reshape(total_items)
    X_r = X.reshape(total_items)
    C_r = C.reshape(total_items)
    F_r = F.reshape(total_items)
    R_r = R.reshape(total_items)

    # Random variable
    Rand = K.random_uniform_variable(shape=(Y_r.shape[0], 1), low=0, high=1)
    coordinates = K.variable(value=np.vstack([X_r, Y_r, C_r, F_r, R_r]).T)
    # coordinates = K.concatenate([Rand, coordinates], axis=1)
    return coordinates