Python keras.backend 模块,in_train_phase() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.in_train_phase()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = self.recurrent_layer.get_constants(
            inputs=inputs,
            training=training
        )

        if 0 < self.dense_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.recurrent_layer.units))

            def dropped_inputs():
                return K.dropout(ones, self.dense_dropout)
            out_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)]
            constants.append(out_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.)])

        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output
项目:Deep-Learning-Plugin    作者:flowjo-lakes    | 项目源码 | 文件源码
def KerasCost(self,y_true, y_pred):
        #create a random subsampling of the target instances for the test set
        #This is rarely going to hit the last entry
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetTrainSize-1)),IntType)
        #this is a subset operation (not a very pretty way to do it)
        MMDTargetSampleTrain = K.gather(self.MMDTargetTrain,sample)
        #do the same for the validation set
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetValidationSize-1)),IntType)
        #and the subset operation
        MMDTargetSampleValidation = K.gather(self.MMDTargetValidation,sample)
        #create the sample based on whether we are in training or validation steps
        MMDtargetSample = K.in_train_phase(MMDTargetSampleTrain, MMDTargetSampleValidation) 
        #return the MMD cost for this subset
        ret= self.cost(self.MMDLayer,MMDtargetSample)
        #pretty dumb but y_treu has to be in the cost for keras to not barf when cleaning up
        ret = ret + 0*K.sum(y_pred)+0*K.sum(y_true)
        return ret
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:BatchEffectRemoval    作者:ushaham    | 项目源码 | 文件源码
def KerasCost(self,y_true, y_pred):
        #create a random subsampling of the target instances for the test set
        #This is rarely going to hit the last entry
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetTrainSize-1)),IntType)
        #this is a subset operation (not a very pretty way to do it)
        MMDTargetSampleTrain = K.gather(self.MMDTargetTrain,sample)
        #do the same for the validation set
        sample = K.cast(K.round(K.random_uniform_variable(shape=tuple([self.MMDTargetSampleSize]), low=0, 
                                                 high=self.MMDTargetValidationSize-1)),IntType)
        #and the subset operation
        MMDTargetSampleValidation = K.gather(self.MMDTargetValidation,sample)
        #create the sample based on whether we are in training or validation steps
        MMDtargetSample = K.in_train_phase(MMDTargetSampleTrain, MMDTargetSampleValidation) 
        #return the MMD cost for this subset
        ret= self.cost(self.MMDLayer,MMDtargetSample)
        #pretty dumb but y_treu has to be in the cost for keras to not barf when cleaning up
        ret = ret + 0*K.sum(y_pred)+0*K.sum(y_true)
        return ret
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:deep-models    作者:LaurentMazare    | 项目源码 | 文件源码
def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants
项目:KerasCog    作者:ABAtanasov    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.0))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.0))
        return constants
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def sample_h_given_x(self, x):
        """
        Draw sample from p(h|x).

        For Bernoulli RBM the conditional probability distribution can be derived to be
           p(h_j=1|x) = sigmoid(x^T W[:,j] + bh_j).
        """
        h_pre = K.dot(x, self.W) + self.bh          # pre-sigmoid (used in cross-entropy error calculation for better numerical stability)
        #h_sigm = K.sigmoid(h_pre)              # mean of Bernoulli distribution ('p', prob. of variable taking value 1), sometimes called mean-field value
    h_sigm = self.activation(self.scaling_h_given_x * h_pre)

    # drop out noise
    if(0.0 < self.p < 1.0):
             noise_shape = self._get_noise_shape(h_sigm)
             h_sigm = K.in_train_phase(K.dropout(h_sigm, self.p, noise_shape), h_sigm)

        h_samp = random_binomial(shape=h_sigm.shape, n=1, p=h_sigm)
                            # random sample
                            #   \hat{h} = 1,      if p(h=1|x) > uniform(0, 1)
                            #             0,      otherwise

        return h_samp, h_pre, h_sigm
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:Named-Entity-Recognition    作者:vishal1796    | 项目源码 | 文件源码
def call(self, X, mask=None):
        if mask is not None:
            assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'

        if self.test_mode == 'viterbi':
            test_output = self.viterbi_decoding(X, mask)
        else:
            test_output = self.get_marginal_prob(X, mask)

        self.uses_learning_phase = True
        if self.learn_mode == 'join':
            train_output = K.zeros_like(K.dot(X, self.kernel))
            out = K.in_train_phase(train_output, test_output)
        else:
            if self.test_mode == 'viterbi':
                train_output = self.get_marginal_prob(X, mask)
                out = K.in_train_phase(train_output, test_output)
            else:
                out = test_output
        return out
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(2)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(2)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(2)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(2)])
        return constants
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.hidden_recurrent_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))

        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))

        return constants
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.input_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def sample_h_given_x(self, x):
        h_pre = K.dot(x, self.Wrbm) + self.bh   
        h_sigm = self.activation(self.scaling_h_given_x * h_pre)

        # drop out noise
        #if(0.0 < self.p < 1.0):
        #   noise_shape = self._get_noise_shape(h_sigm)
        #   h_sigm = K.in_train_phase(K.dropout(h_sigm, self.p, noise_shape), h_sigm)

        if(self.hidden_unit_type == 'binary'):
            h_samp = K.random_binomial(shape=h_sigm.shape, p=h_sigm)
            # random sample
            #   \hat{h} = 1,      if p(h=1|x) > uniform(0, 1)
            #             0,      otherwise
        elif(self.hidden_unit_type == 'nrlu'):
            h_samp = nrlu(h_pre)
        else:
            h_samp = h_sigm

        if(0.0 < self.p < 1.0):
            noise_shape = self._get_noise_shape(h_samp)
            h_samp = K.in_train_phase(K.dropout(h_samp, self.p, noise_shape), h_samp)

        return h_samp, h_pre, h_sigm
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        #print("call")
        if self.force_path:
            output = self._drop_path(inputs)
        else:
            output = K.in_train_phase(self._drop_path(inputs), self._ave(inputs))
        return output
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        return K.in_train_phase(inputs[0],inputs[1],training=training)
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def time_distributed_dense(x, w, b=None, dropout=None,
                           input_dim=None, units=None, timesteps=None):
    """Apply `y . w + b` for every temporal slice y of x.
    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        units: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not units:
        units = K.shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b:
        x += b
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, units]))
        x.set_shape([None, None, units])
    else:
        x = K.reshape(x, (-1, timesteps, units))
    return x
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        if 0. < self.rate < 1.:
            noise_shape = self._get_noise_shape(inputs)

            def dropped_inputs():
                return K.dropout(inputs, self.rate, noise_shape,
                                 seed=self.seed) * (1 - self.rate)
            return K.in_train_phase(dropped_inputs, inputs,
                                    training=training)
        return inputs
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        if 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                       ones,
                                       training=training)
            constants.append(dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = K.in_train_phase(dropped_inputs,
                                           ones,
                                           training=training)
            constants.append(rec_dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants

# Aliases
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        if 0. < self.rate < 1.:
            noise_shape = self._get_noise_shape(inputs)
            inputs = K.in_train_phase(
                        K.dropout(inputs, self.rate, noise_shape) * (1. - self.rate), 
                        inputs)# multiplied by (1. - self.rate) for compensation
        return inputs
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def call(self, x):
        if self.ctype == 'ksparse':
            return K.in_train_phase(self.kSparse(x, self.topk), x)
        elif self.ctype == 'kcomp':
            return K.in_train_phase(self.k_comp_tanh(x, self.topk), x)
        else:
            warnings.warn("Unknown ctype, using no competition.")
            return x
项目:Keras-Multiplicative-LSTM    作者:titu1994    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(5)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(5)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(5)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(5)])
        return constants
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def call(self,logits):
        u = K.random_uniform(K.shape(logits), 0, 1)
        gumbel = - K.log(-K.log(u + 1e-20) + 1e-20)
        return K.in_train_phase(
            K.softmax( ( logits + gumbel ) / self.tau ),
            K.softmax( ( logits + gumbel ) / self.min ))
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def call(self, x, mask=None):
        y_pred = viterbi_decode(x, self.U, self.b_start, self.b_end, mask)
        nb_classes = self.input_spec[0].shape[2]
        y_pred_one_hot = K.one_hot(y_pred, nb_classes)
        return K.in_train_phase(x, y_pred_one_hot)
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants
项目:neural_network    作者:mnpappo    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # if 0. < self.p < 1.:
        x = K.in_train_phase(randdrop(x, level=self.p), x)
        return x
项目:neural_network    作者:mnpappo    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if 0. < self.p < 1.:
            x = K.in_train_phase(dropout_theano(x, level=self.p), x)
        return x
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        def noised():
            stddev = K.stop_gradient(K.sqrt(K.clip(self.factor * K.abs(inputs),
                                                   self.epsilon, None)))
            return inputs + K.random_normal(shape=K.shape(inputs),
                                            mean=0.0,
                                            stddev=stddev)
        return K.in_train_phase(noised, inputs, training=training)
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def call(self, x, mask=None):
        y_pred = viterbi_decode(x, self.U, self.b_start, self.b_end, mask)
        nb_classes = self.input_spec[0].shape[2]
        y_pred_one_hot = K.one_hot(y_pred, nb_classes)
        return K.in_train_phase(x, y_pred_one_hot)
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss)
项目:deep-models    作者:LaurentMazare    | 项目源码 | 文件源码
def zoneout(self, v, prev_v, pr=0.):
    diff = v - prev_v
    diff = K.in_train_phase(K.dropout(diff, pr, noise_shape=(self.output_dim,)), diff)
    # In testing, always return v * (1-pr) + prev_v * pr
    # In training when K.dropout returns 0, return prev_v
    #             when K.dropout returns diff/(1-pr), return v
    return prev_v + diff * (1-pr)
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def call(self, x, mask=None):
        y_pred = viterbi_decode(x, self.U, self.b_start, self.b_end, mask)
        nb_classes = self.input_spec[0].shape[2]
        y_pred_one_hot = K.one_hot(y_pred, nb_classes)
        return K.in_train_phase(x, y_pred_one_hot)
项目:CIAN    作者:yanghanxy    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if 0. < self.W_dropout < 1.:
            def dropped_inputs():
                return K.dropout(x, self.W_dropout)
            x_dp = K.in_train_phase(dropped_inputs, x)
        else:
            x_dp = x
        uit = dot_product(x_dp, self.W)
        if self.bias:
            uit += self.b
        uit = K.tanh(uit)

        if 0. < self.u_dropout < 1.:
            def dropped_inputs():
                return K.dropout(uit, self.u_dropout)
            u_dp = K.in_train_phase(dropped_inputs, uit)
        else:
            u_dp = uit
        ait = dot_product(u_dp, self.u)
        a = K.exp(ait)
        a = K.expand_dims(a)
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        weighted_input = x * a
        output = K.sum(weighted_input, axis=1)

        return output
项目:VASC    作者:wang-research    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        def noised():
            return inputs * K.random_binomial(shape=K.shape(inputs),
                                              p=self.ratio
                                              )
        return K.in_train_phase(noised, inputs, training=training)
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def call(self, inputs, training=None):
        if 0. < self.rate < 1.:
            symbolic_shape = K.shape(inputs)
            noise_shape = [shape if shape > 0 else symbolic_shape[axis]
                           for axis, shape in enumerate(self.noise_shape)]
            noise_shape = tuple(noise_shape)

            def dropped_inputs():
                return K.dropout(inputs, self.rate, noise_shape, seed=self.seed)

            return K.in_train_phase(dropped_inputs, inputs, training=training)

        return inputs
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def return_custom():
    import keras.backend as K
    from keras.engine import Layer

    class Dropout_cust(Layer):  # pragma: no cover
        '''Applies Dropout to the input.
        '''

        def __init__(self, p, **kwargs):
            self.p = p
            if 0. < self.p < 1.:
                self.uses_learning_phase = True
            self.supports_masking = True
            super(Dropout_cust, self).__init__(**kwargs)

        def call(self, x, mask=None):
            if 0. < self.p < 1.:
                x = K.in_train_phase(K.dropout(x, level=self.p), x)
            return x

        def get_config(self):
            config = {'p': self.p}
            base_config = super(Dropout_cust, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

    return Dropout_cust
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if self.test_phase_noise:
            return x+self.get_noise(x)
        else:
            return K.in_train_phase(x+self.get_noise(x), x)
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def __call__(self, x):
        return K.in_train_phase(self.beta * self.get_mi(x), K.variable(0.0))