Python keras.backend 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.zeros()

项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def build(self, input_shape):
        # input shape is (batch_size, num_words, num_senses, num_hyps)
        self.num_senses = input_shape[-2]
        self.num_hyps = input_shape[-1] - 1  # -1 because the last value is a word index
        # embedding of size 1.
        if self.set_sense_priors:
            self.sense_priors = self._get_initial_sense_priors((self.word_index_size, 1), name='{}_sense_priors'.format(self.name))
        else:
            # OntoLSTM makes sense proabilities uniform if the passed sense parameters are zero.
            self.sense_priors = K.zeros((self.word_index_size, 1))  # uniform sense probs
        # Keeping aside the initial weights to not let Embedding set them. It wouldn't know what sense priors are.
        if self.initial_weights is not None:
            self.onto_aware_embedding_weights = self.initial_weights
            self.initial_weights = None
        # The following method will set self.trainable_weights
        super(OntoAwareEmbedding, self).build(input_shape)  # input_shape will not be used by Embedding's build.
        if not self.tune_embedding:
            # Move embedding to non_trainable_weights
            self._non_trainable_weights.append(self._trainable_weights.pop())

        if self.set_sense_priors:
            self._trainable_weights.append(self.sense_priors)

        if self.onto_aware_embedding_weights is not None:
            self.set_weights(self.onto_aware_embedding_weights)
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def _drop_path(self, inputs):
        count = len(inputs)
        drops = K.switch(
            self.is_global,
            self._gen_global_path(count),
            self._gen_local_drops(count, self.p)
        )
        ave = K.zeros(shape=self.average_shape)
        for i in range(0, count):
            ave += inputs[i] * drops[i]
        sum = K.sum(drops)
        # Check that the sum is not 0 (global droppath can make it
        # 0) to avoid divByZero
        ave = K.switch(
            K.not_equal(sum, 0.),
            ave/sum,
            ave)
        return ave
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def reset_states(self, states=None):
        if states is None:
            self.recurrent_layer.reset_states(states)
        else:
            self.recurrent_layer.reset_states(states[:-1])

        batch_size = self.recurrent_layer.input_spec[0].shape[0]
        if self.dense_state is None:
            self.dense_state = K.zeros((
                batch_size,
                self.dense_layer.units
            ))
        elif states is None:
            K.set_value(
                self.dense_state,
                np.zeros((batch_size, self.dense_layer.units))
            )
        else:
            K.set_value(
                self.dense_state,
                states[-1]
            )
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def vectorizeData(xContext, xQuestion, xAnswerBeing, xAnswerEnd, word_index, context_maxlen, question_maxlen):
    '''Vectorize the words to their respective index and pad context to max context length and question to max question length.
       Answers vectors are padded to the max context length as well.
    '''
    X = []
    Xq = []
    YBegin = []
    YEnd = []
    for i in xrange(len(xContext)):
        x = [word_index[w] for w in xContext[i]]
        xq = [word_index[w] for w in xQuestion[i]]
        # map the first and last words of answer span to one-hot representations
        y_Begin =  np.zeros(len(xContext[i]))
        y_Begin[xAnswerBeing[i]] = 1
        y_End = np.zeros(len(xContext[i]))
        y_End[xAnswerEnd[i]] = 1
        X.append(x)
        Xq.append(xq)
        YBegin.append(y_Begin)
        YEnd.append(y_End)
    return pad_sequences(X, maxlen=context_maxlen, padding='post'), pad_sequences(Xq, maxlen=question_maxlen, padding='post'), pad_sequences(YBegin, maxlen=context_maxlen, padding='post'), pad_sequences(YEnd, maxlen=context_maxlen, padding='post')

# Note: Need to download and unzip Glove pre-train model files into same file as this script
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def get_initial_states(self, x):

        init_state_h = K.zeros_like(x)
        init_state_h = K.sum(init_state_h, axis = 1)
        reducer_s = K.zeros((self.input_dim, self.hidden_dim))
        reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
        reducer_p = K.zeros((self.hidden_dim, self.output_dim))
        init_state_h = K.dot(init_state_h, reducer_s)

        init_state_p = K.dot(init_state_h, reducer_p)

        init_state = K.zeros_like(init_state_h)
        init_freq = K.dot(init_state_h, reducer_f)

        init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
        init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))

        init_state_S_re = init_state * init_freq
        init_state_S_im = init_state * init_freq

        init_state_time = K.cast_to_floatx(0.)

        initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
        return initial_states
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def get_initial_states(self, x):

        init_state_h = K.zeros_like(x)
        init_state_h = K.sum(init_state_h, axis = 1)
        reducer_s = K.zeros((self.input_dim, self.hidden_dim))
        reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
        reducer_p = K.zeros((self.hidden_dim, self.output_dim))
        init_state_h = K.dot(init_state_h, reducer_s)

        init_state_p = K.dot(init_state_h, reducer_p)

        init_state = K.zeros_like(init_state_h)
        init_freq = K.dot(init_state_h, reducer_f)

        init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
        init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))

        init_state_S_re = init_state * init_freq
        init_state_S_im = init_state * init_freq

        init_state_time = K.cast_to_floatx(0.)

        initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
        return initial_states
项目:variants_of_rmsprop_and_adagrad    作者:mmahesh    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))


        vs = [K.zeros(K.get_variable_shape(p)) for p in params]
        self.weights = [self.iterations]+ vs

        for p, g, v in zip(params, grads, vs):
            v_t = v + K.square(g)
            p_t = p - self.lr * g / (v_t + self.xi_2*K.exp(-self.xi_1*v_t) )

            self.updates.append(K.update(v, v_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters,
                 centers_initializer='zeros',
                 centers_regularizer=None,
                 centers_constraint=None,
                 stds_initializer='ones',
                 stds_regularizer=None,
                 stds_constraint=None,
                 gauss_scale=100,
                 **kwargs):
        self.filters = filters
        self.gauss_scale = gauss_scale
        super(GaussianReceptiveFields, self).__init__(**kwargs)
        self.centers_initializer = initializers.get(centers_initializer)
        self.stds_initializer = initializers.get(stds_initializer)
        self.centers_regularizer = regularizers.get(centers_regularizer)
        self.stds_regularizer = regularizers.get(stds_regularizer)
        self.centers_constraint = constraints.get(centers_constraint)
        self.stds_constraint = constraints.get(stds_constraint)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, activation='linear',
                 bias_regularizer=None,
                 bias_constraint=None,
                 bias_initializer='zeros',
                 use_bias=True, input_dim=None, **kwargs):

        self.activation = activations.get(activation)
        self.input_dim = input_dim

        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=2)]

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(EminusS, self).__init__(**kwargs)
项目:sciDT    作者:edvisees    | 项目源码 | 文件源码
def build(self):
        input_dim = self.input_shape[3]

        self.W = self.init((input_dim, self.output_dim))
        self.b = K.zeros((self.output_dim,))

        self.trainable_weights = [self.W, self.b]
        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build(self, input_shape):

        # construct the clockwork structures
        # basically: every n units the period changes;
        # `period` is for flaggin this; `mask` is for enforcing it
        n = self.output_dim // len(self.period_spec)
        mask = np.zeros((self.output_dim, self.output_dim), K.floatx())
        period = np.zeros((self.output_dim,), np.int16)
        for i, t in enumerate(self.period_spec):
            mask[i*n:(i+1)*n, i*n:] = 1
            period[i*n:(i+1)*n] = t

        self.mask = K.variable(mask, name='clockword_mask')
        self.period = K.variable(period, dtype='int16', name='clockwork_period')

        super(ClockworkRNN, self).build(input_shape)

        self.U = self.U * self.mask  # old implementation did this at run time...

        # simple rnn initializes the wrong size self.states
        # we want to also keep the time step in the state.
        if self.stateful:
            self.reset_states()
        else:
            self.states = [None, None]
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if self.go_backwards:
            initial_time = self.input_spec[0].shape[1]
        else:
            initial_time = 0.

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1], initial_time)
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)), K.variable(initial_time)]
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided ' +
                            '(including batch size).')

        if self.return_sequences:
            out_row, out_col, out_filter = self.output_shape[2:]
        else:
            out_row, out_col, out_filter = self.output_shape[1:]

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0],
                                  out_row, out_col, out_filter)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0],
                                  out_row, out_col, out_filter)))
        else:
            self.states = [K.zeros((input_shape[0],
                                    out_row, out_col, out_filter)),
                           K.zeros((input_shape[0],
                                    out_row, out_col, out_filter))]
项目:c2w2c    作者:milankinen    | 项目源码 | 文件源码
def build(self, input_shape):
    self.input_spec = [InputSpec(dtype=K.floatx(),
                                 shape=(None, input_shape[0][1], input_shape[0][2])),
                       InputSpec(dtype=K.floatx(),
                                 shape=(None, input_shape[1][1], input_shape[1][2])),
                       InputSpec(dtype=K.floatx(),
                                 shape=(None, input_shape[2][1], input_shape[2][2]))]

    self.W_h = self.init((self.nb_feature, input_shape[0][2], self.output_dim),
                         name='{}_W_h'.format(self.name))

    self.W_y = self.init((self.nb_feature, input_shape[1][2], self.output_dim),
                         name='{}_W_y'.format(self.name))

    self.W_c = self.init((self.nb_feature, input_shape[2][2], self.output_dim),
                         name='{}_W_c'.format(self.name))

    trainable = [self.W_h, self.W_y, self.W_c]

    if self.bias:
      self.b = K.zeros((self.nb_feature, self.output_dim),
                       name='{}_b'.format(self.name))
      self.trainable_weights = trainable + [self.b]
    else:
      self.trainable_weights = trainable
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def build(self, input_shape):
        input_shape = list(input_shape)
        input_shape = input_shape[:1] + [self.output_length] + input_shape[1:]
        if not self.hidden_dim:
            self.hidden_dim = input_shape[-1]
        output_dim = input_shape[-1]
        self.output_dim = self.hidden_dim
        initial_weights = self.initial_weights
        self.initial_weights = None
        super(LSTMDecoder, self).build(input_shape)
        self.output_dim = output_dim
        self.initial_weights = initial_weights
        self.W_y = self.init((self.hidden_dim, self.output_dim), name='{}_W_y'.format(self.name))
        self.b_y = K.zeros((self.output_dim), name='{}_b_y'.format(self.name))
        self.trainable_weights += [self.W_y, self.b_y]
        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        input_shape.pop(1)
        self.input_spec = [InputSpec(shape=tuple(input_shape))]
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_length = input_shape[1]
        if not self.input_length:
            raise Exception ('AttentionDecoder requires input_length.')
        initial_weights = self.initial_weights
        self.initial_weights = None
        super(AttentionDecoder, self).build(input_shape[:1] + input_shape[2:])
        self.initial_weights = initial_weights
        dim = self.input_dim
        hdim = self.hidden_dim
        self.W_h = self.init((hdim, dim), name='{}_W_h'.format(self.name))
        self.b_h = K.zeros((dim, ), name='{}_b_h'.format(self.name))
        self.W_a = self.init((dim, 1), name='{}_W_a'.format(self.name))
        self.b_a = K.zeros((1,), name='{}_b_a'.format(self.name))
        self.trainable_weights += [self.W_a, self.b_a, self.W_h, self.b_h]
        if self.initial_weights is not None:
            self.set_weights(self.inital_weights)
            del self.initial_weights
项目:keras-convautoencoder    作者:nanopony    | 项目源码 | 文件源码
def build(self):
        self.W = self.master_layer.W.T
        self.b = K.zeros((self.output_dim,))
        self.params = [self.b]
        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:keras-caffenet    作者:yjn870    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale
项目:Implementation-CVPR2015-CNN-for-ReID    作者:Ning-Ding    | 项目源码 | 文件源码
def cmc(model):

    def cmc_curve(model, camera1, camera2, rank_max=50):
        num = camera1.shape[0]    
        rank = []
        score = []    
        camera_batch1 = np.zeros(camera1.shape)
        for i in range(num):
            for j in range(num):
                camera_batch1[j] = camera1[i]
            similarity_batch = model.predict_on_batch([camera_batch1, camera2])
            sim_trans = similarity_batch.transpose()
            similarity_rate_sorted = np.argsort(sim_trans[0])
            for k in range(num):
                if similarity_rate_sorted[k] == i:
                    rank.append(k+1)
                    break
        rank_val = 0
        for i in range(rank_max):
            rank_val = rank_val + len([j for j in rank if i == j-1])        
            score.append(rank_val / float(num))
        return np.array(score)  

    a,b = get_data_for_cmc()
    return cmc_curve(model,a,b)
项目:huffmax    作者:farizrahman4u    | 项目源码 | 文件源码
def call(self, x, mask=None):

        def get_node_w(node):
            return self.W[self.node_indices[node], :, :]

        def get_node_b(node):
            return self.b[self.node_indices[node], :]

        def compute_output(input, node=self.root_node):
            if not hasattr(node, 'left'):
                return zeros((K.shape(input)[0],)) + self.node_indices[node]
            else:
                node_output = K.dot(x, get_node_w(node))
                if self.bias:
                    node_output += get_node_b(node)
                left_prob = node_output[:, 0]
                right_prob = 1 - node_output[:, 0]
                left_node_output = compute_output(input, node.left)
                right_node_output = compute_output(input, node.right)
                return K.switch(left_prob > right_prob, left_node_output, right_node_output)
        return K.cast(compute_output(x), 'int32')
项目:audit-log-detection    作者:twosixlabs    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (input_shape[self.axis],)

        self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
        self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
        self.trainable_weights = [self.gamma, self.beta]

        self.running_mean = K.zeros(shape,
                                    name='{}_running_mean'.format(self.name))
        self.running_std = K.ones(shape,
                                  name='{}_running_std'.format(self.name))
        self.non_trainable_weights = [self.running_mean, self.running_std]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        self.built = True
        self.called_with = None
项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def contrastive_divergence_loss(self, x, dummy):
        """
        Compute contrastive divergence loss with k steps of Gibbs sampling (CD-k).

        Result is a Theano expression with the form loss = f(x).
        """

    if(self.is_persistent):
        #self.persistent_chain = theano.shared(np.random.randint(0, 1, (self.batch_size, self.input_dim)).astype('f'), borrow=True)
        #self.persistent_chain = theano.shared(np.zeros((self.batch_size, self.input_dim)).astype('f'), borrow=True)
        chain_start = self.persistent_chain
    else:
        chain_start = x

        def loss(chain_start, x):
        x_rec, _, _ = self.mcmc_chain(chain_start, self.nb_gibbs_steps)
            cd = K.mean(self.free_energy(x)) - K.mean(self.free_energy(x_rec))
            return cd, x_rec

    y, x_rec = loss(chain_start, x)

    if(self.is_persistent):
        self.updates = [(self.persistent_chain, x_rec)]

        return y
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, it needs to know '
                            'its batch size. Specify the batch size '
                            'of your input tensors: \n'
                            '- If using a Sequential model, '
                            'specify the batch size by passing '
                            'a `batch_input_shape` '
                            'argument to your first layer.\n'
                            '- If using the functional API, specify '
                            'the time dimension by passing a '
                            '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, it needs to know '
                            'its batch size. Specify the batch size '
                            'of your input tensors: \n'
                            '- If using a Sequential model, '
                            'specify the batch size by passing '
                            'a `batch_input_shape` '
                            'argument to your first layer.\n'
                            '- If using the functional API, specify '
                            'the time dimension by passing a '
                            '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def build(self, input_shape):

        ### construct the clockwork structures
        ### basically: every n units the period changes;
        ### `period` is for flaggin this; `mask` is for enforcing it
        n = self.output_dim // len(self.period_spec)
        mask = np.zeros((self.output_dim, self.output_dim), K.floatx())
        period = np.zeros((self.output_dim,), np.int16)
        for i, t in enumerate(self.period_spec):
            mask[i*n:(i+1)*n, i*n:] = 1
            period[i*n:(i+1)*n] = t
        self.mask = K.variable(mask, name='clockword_mask')
        self.period = K.variable(period, dtype='int16', name='clockwork_period')

        super(ClockworkRNN, self).build(input_shape)

        self.U = self.U * self.mask  ### old implementation did this at run time... 

        ### simple rnn initializes the wrong size self.states
        ### we want to also keep the time step in the state. 
        if self.stateful:
            self.reset_states()
        else:
            self.states = [None, None]
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if self.go_backwards:
            initial_time = self.input_spec[0].shape[1]
        else:
            initial_time = 0.

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1], initial_time)
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)), K.variable(initial_time)]
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape

        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.hidden_recurrent_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.input_dim)))
            K.set_value(self.states[2],
                        np.zeros((input_shape[0], self.hidden_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)),
                            K.zeros((input_shape[0], self.input_dim)),
                            K.zeros((input_shape[0], self.hidden_dim))]
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.input_dim = input_shape[2]

        self.W = self.init((self.output_dim, 4 * self.input_dim),
                           name='{}_W'.format(self.name))
        self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
                                 name='{}_U'.format(self.name))
        self.b = K.variable(np.hstack((np.zeros(self.input_dim),
                                       K.get_value(self.forget_bias_init((self.input_dim,))),
                                       np.zeros(self.input_dim),
                                       np.zeros(self.input_dim))),
                            name='{}_b'.format(self.name))

        self.A = self.init((self.input_dim, self.output_dim),
                            name='{}_A'.format(self.name))
        self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


        self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, it needs to know '
                             'its batch size. Specify the batch size '
                             'of your input tensors: \n'
                             '- If using a Sequential model, '
                             'specify the batch size by passing '
                             'a `batch_input_shape` '
                             'argument to your first layer.\n'
                             '- If using the functional API, specify '
                             'the time dimension by passing a '
                             '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.input_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.input_dim)),
                            K.zeros((input_shape[0], self.output_dim))]
项目:FaceDetection    作者:youssefhb    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale
项目:FaceDetection    作者:youssefhb    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, h, mask=None):
        print(h.get_shape(), self.hatt.get_shape())
        Ec = K.dot(h, self.W)
        Eq = K.dot(self.hatt, self.U)
        E = np.zeros((h.shape[0], self.hatt.shape[0]))
        for i in E.shape[0]:
            for j in E.shape[1]:
                E[i,j] = K.dot(K.tanh(Ec[i,:] + Eq[j,:]), self.V)
        return h
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def build(self, input_shape):
        super(AttentionLSTM, self).build(input_shape)

        if hasattr(self.attention_vec, '_keras_shape'):
            attention_dim = self.attention_vec._keras_shape[1]
        else:
            raise Exception('Layer could not be build: No information about expected input shape.')

        self.U_a = self.inner_init((self.output_dim, self.output_dim),
                                   name='{}_U_a'.format(self.name))
        self.b_a = K.zeros((self.output_dim,), name='{}_b_a'.format(self.name))

        self.U_m = self.inner_init((attention_dim, self.output_dim),
                                   name='{}_U_m'.format(self.name))
        self.b_m = K.zeros((self.output_dim,), name='{}_b_m'.format(self.name))

        if self.single_attention_param:
            self.U_s = self.inner_init((self.output_dim, 1),
                                       name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name))
        else:
            self.U_s = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((self.output_dim,), name='{}_b_s'.format(self.name))

        self.trainable_weights += [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def build(self, input_shape):
        assert len(input_shape) >= 3
        self.input_spec = [InputSpec(shape=input_shape)]

        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = True

        super(AttentionLSTMWrapper, self).build()

        if hasattr(self.attention_vec, '_keras_shape'):
            attention_dim = self.attention_vec._keras_shape[1]
        else:
            raise Exception('Layer could not be build: No information about expected input shape.')

        self.U_a = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_a'.format(self.name))
        self.b_a = K.zeros((self.layer.output_dim,), name='{}_b_a'.format(self.name))

        self.U_m = self.layer.inner_init((attention_dim, self.layer.output_dim), name='{}_U_m'.format(self.name))
        self.b_m = K.zeros((self.layer.output_dim,), name='{}_b_m'.format(self.name))

        if self.single_attention_param:
            self.U_s = self.layer.inner_init((self.layer.output_dim, 1), name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name))
        else:
            self.U_s = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((self.layer.output_dim,), name='{}_b_s'.format(self.name))

        self.trainable_weights = [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # note that the .build() method of subclasses MUST define
        # self.input_spec with a complete input shape.
        input_shape = self.input_spec[0].shape
        if K._BACKEND == 'tensorflow':
            if not input_shape[1]:
                raise Exception('When using TensorFlow, you should define '
                                'explicitly the number of timesteps of '
                                'your sequences.\n'
                                'If your first layer is an Embedding, '
                                'make sure to pass it an "input_length" '
                                'argument. Otherwise, make sure '
                                'the first layer has '
                                'an "input_shape" or "batch_input_shape" '
                                'argument, including the time axis. '
                                'Found input shape at layer ' + self.name +
                                ': ' + str(input_shape))
        if self.layer.stateful:
            initial_states = self.layer.states
        else:
            initial_states = self.layer.get_initial_states(x)
        constants = self.get_constants(x)
        preprocessed_input = self.layer.preprocess_input(x)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=self.layer.go_backwards,
                                             mask=mask,
                                             constants=constants,
                                             unroll=self.layer.unroll,
                                             input_length=input_shape[1])
        if self.layer.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.layer.states[i], states[i]))

        if self.layer.return_sequences:
            return outputs
        else:
            return last_output
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def build(self, input_shape):
        super(AttentionLSTM, self).build(input_shape)

        if hasattr(self.attention_vec, '_keras_shape'):
            attention_dim = self.attention_vec._keras_shape[1]
        else:
            raise Exception('Layer could not be build: No information about expected input shape.')

        self.U_a = self.inner_init((self.output_dim, self.output_dim),
                                   name='{}_U_a'.format(self.name))
        self.b_a = K.zeros((self.output_dim,), name='{}_b_a'.format(self.name))

        self.U_m = self.inner_init((attention_dim, self.output_dim),
                                   name='{}_U_m'.format(self.name))
        self.b_m = K.zeros((self.output_dim,), name='{}_b_m'.format(self.name))

        if self.single_attention_param:
            self.U_s = self.inner_init((self.output_dim, 1),
                                       name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name))
        else:
            self.U_s = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_s'.format(self.name))
            self.b_s = K.zeros((self.output_dim,), name='{}_b_s'.format(self.name))

        self.trainable_weights += [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # input shape: (nb_samples, time (padded with zeros), input_dim)
        # note that the .build() method of subclasses MUST define
        # self.input_spec with a complete input shape.
        input_shape = self.input_spec[0].shape
        if K._BACKEND == 'tensorflow':
            if not input_shape[1]:
                raise Exception('When using TensorFlow, you should define '
                                'explicitly the number of timesteps of '
                                'your sequences.\n'
                                'If your first layer is an Embedding, '
                                'make sure to pass it an "input_length" '
                                'argument. Otherwise, make sure '
                                'the first layer has '
                                'an "input_shape" or "batch_input_shape" '
                                'argument, including the time axis. '
                                'Found input shape at layer ' + self.name +
                                ': ' + str(input_shape))
        if self.layer.stateful:
            initial_states = self.layer.states
        else:
            initial_states = self.layer.get_initial_states(x)
        constants = self.get_constants(x)
        preprocessed_input = self.layer.preprocess_input(x)

        last_output, outputs, states = K.rnn(self.step, preprocessed_input,
                                             initial_states,
                                             go_backwards=self.layer.go_backwards,
                                             mask=mask,
                                             constants=constants,
                                             unroll=self.layer.unroll,
                                             input_length=input_shape[1])
        if self.layer.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.layer.states[i], states[i]))

        if self.layer.return_sequences:
            return outputs
        else:
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def build(self, input_shape):
        input_dim = input_shape[2]
        _, output_length, nb_filter = self.get_output_shape_for(input_shape)

        self.W_shape = (output_length, self.filter_length * input_dim, nb_filter)
        self.W = self.init(self.W_shape, name='{}_W'.format(self.name))
        if self.bias:
            self.b = K.zeros((output_length, self.nb_filter), name='{}_b'.format(self.name))
            self.trainable_weights = [self.W, self.b]
        else:
            self.trainable_weights = [self.W]

        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)
        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)
        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.b_constraint:
            self.constraints[self.b] = self.b_constraint

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        self.built = True
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_last():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_first():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_first')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [3, input_dim, input_dim]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def initial(number_of_memory_locations, memory_vector_size):
    return K.zeros((number_of_memory_locations, memory_vector_size))
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)),
                           K.zeros((input_shape[0], self.output_dim))]
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def build(self, input_dim):
        self.input_channels = input_dim[1]
        self.ppn_gen = generators.HyperNetwork(output_shape=self.weight_shape,
                                               hidden_dim=self.hidden_dim,
                                              num_filters=self.nb_filters,
                                              input_channels=self.input_channels)
        self.W = self.ppn_gen.output  # PPN generator output, used as filter
        self.gen_weights = self.ppn_gen.weights # Weight of the generator
        self.gen_bias = self.ppn_gen.biases
        self.b = K.zeros((self.nb_filters))
        self.trainable_weights = self.gen_weights + self.gen_bias + [
            self.ppn_gen.z] + [self.b]
        self.non_trainable_weights = [self.W]
        self.built = True
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def build(self, input_dim):
        self.input_channels = input_dim[1]
        self.ppn_gen = generators.FFMatrixGen(output_shape=self.weight_shape,
                                              layer_sizes=self.layer_sizes,
                                              num_filters=self.nb_filters,
                                              input_channels=self.input_channels)
        self.W = self.ppn_gen.output  # PPN generator output, used as filter
        self.gen_weights = self.ppn_gen.weights # Weight of the generator
        self.gen_bias = self.ppn_gen.biases
        self.b = K.zeros((self.nb_filters))
        self.trainable_weights = self.gen_weights + self.gen_bias + [
            self.ppn_gen.z_r]
        self.non_trainable_weights = [self.W + self.b]
        self.built = True
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def build(self, input_dim):
    self.input_channels = input_dim[1]
    rows, cols = input_dim[2], input_dim[3]

    self.ppn_gen = HyperNetwork_gen(output_shape=self.weight_shape,
                                           rows=rows, cols=cols,
                                           hidden_dim=self.hidden_dim,
                                           num_filters=self.nb_filters,
                                           input_channels=self.input_channels)
    self.gen_weights = self.ppn_gen.weights  # Weight of the generator
    self.gen_bias = self.ppn_gen.biases
    self.b = K.zeros((self.nb_filters))
    self.trainable_weights = self.gen_weights + self.gen_bias + [self.b]
    self.built = True