Python keras.backend 模块,concatenate() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.concatenate()

项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # input_shape = (batch_size, input_length, input_dim). This needs to be defined in build.
        initial_read_states = self.get_initial_states(x, mask)
        fake_writer_input = K.expand_dims(initial_read_states[0], dim=1)  # (batch_size, 1, output_dim)
        initial_write_states = self.writer.get_initial_states(fake_writer_input)  # h_0 and c_0 of the writer LSTM
        initial_states = initial_read_states + initial_write_states
        # last_output: (batch_size, output_dim)
        # all_outputs: (batch_size, input_length, output_dim)
        # last_states:
        #       last_memory_state: (batch_size, input_length, output_dim)
        #       last_output
        #       last_writer_ct
        last_output, all_outputs, last_states = self.loop(x, initial_states, mask)
        last_memory = last_states[0]
        if self.return_mode == "last_output":
            return last_output
        elif self.return_mode == "all_outputs":
            return all_outputs
        else:
            # return mode is output_and_memory
            expanded_last_output = K.expand_dims(last_output, dim=1)  # (batch_size, 1, output_dim)
            # (batch_size, 1+input_length, output_dim)
            return K.concatenate([expanded_last_output, last_memory], axis=1)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def call(self, inputs, mask=None, initial_state=None, training=None):
        inputs_shape = K.shape(inputs)
        zeros = tf.zeros(
            shape=[
                inputs_shape[0],
                inputs_shape[1] - 1,
                self.layer.units
            ]
        )
        outputs = self.layer.call(
            inputs=inputs,
            mask=mask,
            initial_state=initial_state,
            training=training
        )
        outputs = K.reshape(
            tf.slice(outputs, [0, inputs_shape[1] - 1, 0], [-1, 1, -1]),
            shape=(inputs_shape[0], 1, self.layer.units)
        )
        outputs = K.concatenate([outputs, zeros], axis=1)

        if 0 < self.layer.dropout + self.layer.recurrent_dropout:
            outputs._uses_learning_phase = True
        return outputs
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        #if self.consume_less == 'cpu':
        #    input_shape = K.int_shape(x)
        #    input_dim = input_shape[2]
        #    timesteps = input_shape[1]

        #    x_z = time_distributed_dense(x, self.W_z, self.b_z, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_r = time_distributed_dense(x, self.W_r, self.b_r, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    return K.concatenate([x_z, x_r, x_h], axis=2)
        #else:
        #    return x
        self.ha = time_distributed_dense(self.h, self.Ua)
        return inputs
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        #if self.consume_less == 'cpu':
        #    input_shape = K.int_shape(x)
        #    input_dim = input_shape[2]
        #    timesteps = input_shape[1]

        #    x_z = time_distributed_dense(x, self.W_z, self.b_z, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_r = time_distributed_dense(x, self.W_r, self.b_r, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    return K.concatenate([x_z, x_r, x_h], axis=2)
        #else:
        #    return x
        self.ha = K.dot(self.h, self.Ua) #time_distributed_dense(self.h, self.Ua)
        return inputs
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        #if self.consume_less == 'cpu':
        #    input_shape = K.int_shape(x)
        #    input_dim = input_shape[2]
        #    timesteps = input_shape[1]

        #    x_z = time_distributed_dense(x, self.W_z, self.b_z, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_r = time_distributed_dense(x, self.W_r, self.b_r, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
        #                                 input_dim, self.units, timesteps)
        #    return K.concatenate([x_z, x_r, x_h], axis=2)
        #else:
        #    return x
        self.ha = K.dot(self.h, self.Ua) #time_distributed_dense(self.h, self.Ua)
        return inputs
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="attention_generator")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_f(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_forw")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_b(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_back")
        return model
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def preprocess_input(self, x):
        if self.consume_less == 'cpu':
            if 0 < self.dropout_W < 1:
                dropout = self.dropout_W
            else:
                dropout = 0
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[2]
            timesteps = input_shape[1]

            x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
                                         input_dim, self.output_dim, timesteps)
            return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
        else:
            return x
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def setup_output(self):
        """
        Setup output tensor

        """

        coordinates = get_coordinates(self.output_shape,
                                      input_channels=self.input_channels,
                                      num_filters=self.num_filters)

        num_parameters = np.prod(self.output_shape) * self.num_filters * \
                         self.input_channels
        print (num_parameters)
        # self.z_r = K.repeat_elements(self.z, rep=num_parameters, axis=0)
        self.z_r = self.init((num_parameters, 4))
        # coordinates = K.concatenate([self.z_r, coordinates], axis=1)

        output = K.tanh(K.dot(self.z_r, self.weights[0]) + self.biases[0])

        for i in range(1, len(self.weights) - 1):
            output = K.tanh(K.dot(output, self.weights[i]) + self.biases[i])
        output = K.sigmoid(K.dot(output, self.weights[-1]) + self.biases[-1])

        self.output = K.reshape(output, (self.num_filters, self.input_channels,
                                         *self.output_shape))
项目:keras-yolo    作者:BrainsGarden    | 项目源码 | 文件源码
def _process_input(self, x):
        """Apply logistic and softmax activations to input tensor
        """
        logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))

        (batch, w, h, channels) = x.get_shape()
        x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
        x_t = []
        for i in range(self.num):
            k = self._entry_index(i, 0)
            x_t.extend([
                logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
                K.gather(x_temp, (k + 2, k + 3))])
            if self.background:
                x_t.append(K.gather(x_temp, (k + 4,)))
            else:
                x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))

            x_t.append(
                softmax(
                    K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
                    axis=0))
        x_t = K.concatenate(x_t, axis=0)
        return K.permute_dimensions(x_t, (1, 2, 3, 0))
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def contingency_table(y, z):
    """Compute contingency table."""
    y = K.round(y)
    z = K.round(z)

    def count_matches(a, b):
        tmp = K.concatenate([a, b])
        return K.sum(K.cast(K.all(tmp, -1), K.floatx()))

    ones = K.ones_like(y)
    zeros = K.zeros_like(y)
    y_ones = K.equal(y, ones)
    y_zeros = K.equal(y, zeros)
    z_ones = K.equal(z, ones)
    z_zeros = K.equal(z, zeros)

    tp = count_matches(y_ones, z_ones)
    tn = count_matches(y_zeros, z_zeros)
    fp = count_matches(y_zeros, z_ones)
    fn = count_matches(y_ones, z_zeros)

    return (tp, tn, fp, fn)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:MSgothicPolice    作者:ysdyt    | 项目源码 | 文件源码
def call(self, x):

        input_shape = K.shape(x)

        len_i, len_j = input_shape[1], input_shape[2]

        outputs = []

        for nb_bins in self.nb_bins_per_level:
            bin_size_i = K.cast(len_i, 'int32') // nb_bins
            bin_size_j = K.cast(len_j, 'int32') // nb_bins

            for i, j in product(range(nb_bins), range(nb_bins)):
                # each combination of i,j is a unique rectangle
                i1, i2 = bin_size_i * i, bin_size_i * (i + 1)
                j1, j2 = bin_size_j * j, bin_size_j * (j + 1)

                pooled_features = K.max(x[:, i1:i2, j1:j2, :], axis=(1, 2))

                outputs.append(pooled_features)

        return K.concatenate(outputs, axis=1)
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def sample(self, batch_size=128, ordered=False, **kwargs):
        if ordered:
            group_size = batch_size / len(self.dists)
            if group_size * len(self.dists) < batch_size: 
                group_size += 1

            c = []
            for ordered_dist in self.dists:
                c_group = []
                for random_dist in self.dists:
                    if random_dist == ordered_dist:
                        c_group.append(ordered_dist.sample(group_size, ordered=True, **kwargs))
                    else:
                        c_group.append(random_dist.sample(group_size, ordered=False, **kwargs))
                c_group = np.concatenate(c_group, axis=1)
                c.append(c_group)
            c = np.concatenate(c, axis=0)
            c = c[:batch_size]
        else:
            c = [dist.sample(batch_size, **kwargs) for dist in self.dists]
            c = np.concatenate(c, axis=1)
        return c
项目:neuralforecast    作者:maxpumperla    | 项目源码 | 文件源码
def step(self, x, states):
        hidden_input = states[0]
        B_U = states[1]
        B_W = states[2]

        # Make last hidden input the residual of the prediction and
        # the last available feature.
        if self.inner_input_dim > 0:
            update = K.expand_dims(hidden_input[:, -1] - x[:, -1])
            hidden_input = K.concatenate((hidden_input[:, :-1], update))

        if self.ma_only:
            h = self.b
        else:
            h = K.dot(x * B_W, self.W) + self.b

        if self.inner_input_dim > 0:
            output = self.activation(h + K.dot(hidden_input * B_U, self.U))
            new_state = K.concatenate((hidden_input[:, 1:], output))
            return output, [new_state]
        else:
            output = self.activation(h)
            return output, [output]
项目:neuralforecast    作者:maxpumperla    | 项目源码 | 文件源码
def step(self, x, states):
        hidden_input = states[0]
        B_U = states[1]  # Dropout mask for U
        B_W = states[2]  # Dropout mask for W

        # Make last hidden input the residual of the prediction and
        # the last available feature.
        if self.inner_input_dim > 0:
            update = K.expand_dims(hidden_input[:, -1] - x[:, -1])
            hidden_input = K.concatenate((hidden_input[:, :-1], update))

        if self.ma_only:
            h = self.b
        else:
            h = K.dot(x * B_W, self.W) + self.b

        if self.inner_input_dim > 0:
            output = self.activation(h + K.dot(hidden_input * B_U, self.U))
            new_state = K.concatenate((hidden_input[:, 1:], output))
            return output, [new_state]
        else:
            output = self.activation(h)
            return output, [output]
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    """Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask."""
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def _forward(x, reduce_step, initial_states, U, mask=None):
    """Forward recurrence of the linear chain crf."""

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values
项目:keras-caffenet    作者:yjn870    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale
项目:YAD2K    作者:allanzelener    | 项目源码 | 文件源码
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
项目:deep-learning-essentials    作者:DominicBreuker    | 项目源码 | 文件源码
def call(self, x, mask=None):
        X = x
        half_n = self.n // 2
        input_sqr = K.square(X)
        if K._BACKEND == 'theano':
            b, ch, r, c = X.shape
            extra_channels = T.alloc(0., b, ch + 2*half_n, r, c)
            input_sqr = T.set_subtensor(
                extra_channels[:, half_n:half_n+ch, :, :], input_sqr)
        elif K._BACKEND == 'tensorflow':
            b, ch, r, c = K.int_shape(X)
            up_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            up = tf.fill(up_dims, 0.0)
            middle = input_sqr
            down_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            down = tf.fill(down_dims, 0.0)
            input_sqr = K.concatenate([up, middle, down], axis=1)
        scale = self.k
        norm_alpha = self.alpha / self.n
        for i in range(self.n):
            scale += norm_alpha * input_sqr[:, i:i+ch, :, :]
        scale = scale ** self.beta
        result = X / scale
        return result
项目:keras-neural-tensor-layer    作者:dapurv5    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
    if type(inputs) is not list or len(inputs) <= 1:
      raise Exception('BilinearTensorLayer must be called on a list of tensors '
                      '(at least 2). Got: ' + str(inputs))
    e1 = inputs[0]
    e2 = inputs[1]
    batch_size = K.shape(e1)[0]
    k = self.output_dim
    # print([e1,e2])
    feed_forward_product = K.dot(K.concatenate([e1,e2]), self.V)
    # print(feed_forward_product)
    bilinear_tensor_products = [ K.sum((e2 * K.dot(e1, self.W[0])) + self.b, axis=1) ]
    # print(bilinear_tensor_products)
    for i in range(k)[1:]:
      btp = K.sum((e2 * K.dot(e1, self.W[i])) + self.b, axis=1)
      bilinear_tensor_products.append(btp)
    result = K.tanh(K.reshape(K.concatenate(bilinear_tensor_products, axis=0), (batch_size, k)) + feed_forward_product)
    # print(result)
    return result
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def build(self, input_shape):
        """ Create trainable weights for this connection
        """
        # Number of trainable weights = sum of all filters in `input_shape`
        nb_filters = sum([s[3] for s in input_shape])

        # Create shared weights for all filters within an input layer.
        if self.shared_weights:
            weights = []
            for shape in input_shape:
                # Create shared weight, make nb_filter number of clones.
                shared_w = K.variable(np.ones(1) * self.init_value)
                for _ in range(shape[3]):
                    weights.append(shared_w)
            self.W = K.concatenate(weights)
        else:
            self.W = K.variable(np.ones(shape=nb_filters) * self.init_value)

        self._trainable_weights.append(self.W)
        super(Connection, self).build(input_shape)
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def deep_encoder1(input_shape):
    input_img = Input(shape=input_shape)
    print input_img._keras_shape

    x = Conv2D(32, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(input_img)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 16, 16
    x = Conv2D(64, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 8, 8
    x = Conv2D(128, 3, 3, activation='relu', border_mode='same', subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 4, 4
    latent_dim = (1, 1, 1024)
    z_mean = Conv2D(1024, 4, 4, activation='linear',
                    border_mode='same', subsample=(4, 4))(x)
    z_log_var = Conv2D(1024, 4, 4, activation='linear',
                       border_mode='same', subsample=(4, 4))(x)
    encoded = K.concatenate([z_mean, z_log_var], axis=1)
    # print 'encoded_shape', encoded.get_shape().as_list()
    return Model(input_img, [z_mean, z_log_var])
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def call(self, x, mask=None):
        input_, flow_layer_ = x
        stride_row, stride_col = self.subsample
        shape = input_._keras_shape
        output_row = shape[1] - self.kernel_size + 1
        output_col = shape[2] - self.kernel_size + 1
        xs = []
        ws = []
        for i in range(output_row):
            for j in range(output_col):
                slice_row = slice(i * stride_row,
                                  i * stride_row + self.kernel_size)
                slice_col = slice(j * stride_col,
                                  j * stride_col + self.kernel_size)
                xs.append(K.reshape(input_[:, slice_row, slice_col, :],
                                    (1, -1, self.kernel_size ** 2, shape[-1])))
                ws.append(K.reshape(flow_layer_[:, i, j, :], (1, -1, self.kernel_size ** 2, 1)))
        x_aggregate = K.concatenate(xs, axis=0)
        x_aggregate = K.permute_dimensions(x_aggregate, (0, 1, 3, 2))
        W = K.concatenate(ws, axis=0)
        output = K.batch_dot(x_aggregate, W)
        output = K.reshape(output, (output_row, output_col, -1, shape[3]))
        output = K.permute_dimensions(output, (2, 0, 1, 3))
        output = self.activation(output)
        return output
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def preprocess_input(self, x):
        if self.consume_less == 'cpu':
            if 0 < self.dropout_W < 1:
                dropout = self.dropout_W
            else:
                dropout = 0
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[2]
            timesteps = input_shape[1]

            x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
                                         input_dim, self.output_dim, timesteps)
            x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
                                         input_dim, self.output_dim, timesteps)
            return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
        else:
            return x
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        vP_t = inputs
        hP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks 
        vP, WP_v, WPP_v, v, W_g2 = states[3:8]
        vP_mask, = states[8:]

        WP_v_Dot = K.dot(vP, WP_v)
        WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v)

        s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot)
        s_t = K.dot(s_t_hat, v)
        s_t = K.batch_flatten(s_t)

        a_t = softmax(s_t, mask=vP_mask, axis=1)

        c_t = K.batch_dot(a_t, vP, axes=[1, 1])

        GRU_inputs = K.concatenate([vP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g2))
        GRU_inputs = g * GRU_inputs

        hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states)

        return hP_t, s
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        uP_t = inputs
        vP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks
        uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9]
        uQ_mask, = states[9:10]

        WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u
        WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v
        WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u

        s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot)

        s_t = K.dot(s_t_hat, v) # v
        s_t = K.batch_flatten(s_t)
        a_t = softmax(s_t, mask=uQ_mask, axis=1)
        c_t = K.batch_dot(a_t, uQ, axes=[1, 1])

        GRU_inputs = K.concatenate([uP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g1))  # W_g1
        GRU_inputs = g * GRU_inputs
        vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states)

        return vP_t, s
项目:PhasedLSTM-Keras    作者:fferroni    | 项目源码 | 文件源码
def preprocess_input(self, inputs, training=None):
        if self.implementation == 0:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[2]
            timesteps = input_shape[1]

            x_i = _time_distributed_dense(inputs, self.kernel_i, self.bias_i,
                                          self.dropout, input_dim, self.units,
                                          timesteps, training=training)
            x_f = _time_distributed_dense(inputs, self.kernel_f, self.bias_f,
                                          self.dropout, input_dim, self.units,
                                          timesteps, training=training)
            x_c = _time_distributed_dense(inputs, self.kernel_c, self.bias_c,
                                          self.dropout, input_dim, self.units,
                                          timesteps, training=training)
            x_o = _time_distributed_dense(inputs, self.kernel_o, self.bias_o,
                                          self.dropout, input_dim, self.units,
                                          timesteps, training=training)
            return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
        else:
            return inputs
项目:FaceDetection    作者:youssefhb    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale
项目:FaceDetection    作者:youssefhb    | 项目源码 | 文件源码
def get_output(self, train):
        X = self.get_input(train)
        b, ch, r, c = K.shape(X)
        half_n = self.n // 2
        input_sqr = K.square(X)

        extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
        input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
                                   input_sqr,
                                   extra_channels[:, half_n + ch:, :, :]],
                                  axis=1)
        scale = self.k

        for i in range(self.n):
            scale += self.alpha * input_sqr[:, i:i + ch, :, :]
        scale = scale ** self.beta

        return X / scale