Python keras.backend 模块,tile() 实例源码

我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用keras.backend.tile()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = self.recurrent_layer.get_constants(
            inputs=inputs,
            training=training
        )

        if 0 < self.dense_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.recurrent_layer.units))

            def dropped_inputs():
                return K.dropout(ones, self.dense_dropout)
            out_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training)]
            constants.append(out_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.)])

        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
    # **kwargs is for unsupported options (ignored)
    cval = tf.fill(K.shape(image)[0:1], cval)
    shape = K.int_shape(image)[1:3]
    if center is None:
        center = np.array(shape) / 2
    ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
    xs = np.expand_dims(np.tile  (np.arange(shape[1]), shape[0]),-1)
    map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)

    mapping = np.zeros((*shape, *shape))
    for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
        results = tensor_linear_interpolation(image, map_x, map_y, cval)
        for _y, _x, w in results:
            # mapping[int(y),int(x),int(_y),int(_x),] = w
            mapping[int(_y),int(_x),int(y),int(x),] = w


    results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
    # results = K.reshape(results, K.shape(image))
    return results
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_conv_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')(
            self.state_in)
        self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')(
            self.l1)
        # self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')(
        #     self.l2)
        self.l3 = self.l2
        self.h = Flatten()(self.l3)
        self.hidden = Dense(256, init=init, activation='elu')(self.h)
        self.value = Dense(1, init=init)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_fc_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.hidden = Dense(256, init=init, activation='elu')(self.state_in)
        self.value = Dense(1)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)

        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        # print (type(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)))
        # print(Theano.function([self.state_in], [Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)])([np.zeros((32,) + self.state_dim)])[0].shape)
        # 1/0
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def custom_for_keras(self, ALL_word_embeds):
        ## only the top 20 rows from word_vectors is legit!
        def top_accuracy(true_word_indices, image_vectors):
            l2 = lambda x, axis: K.sqrt(K.sum(K.square(x), axis=axis, keepdims=True))
            l2norm = lambda x, axis: x/l2(x, axis)

            l2_words = l2norm(ALL_word_embeds, axis=1)
            l2_images = l2norm(image_vectors, axis=1)

            tiled_words = K.tile(K.expand_dims(l2_words, axis=1) , (1, 200, 1))
            tiled_images = K.tile(K.expand_dims(l2_images, axis=1), (1, 20, 1))

            diff = K.squeeze(l2(l2_words - l2_images, axis=2))

            # slice_top3 = lambda x: x[:, 0:3]
            # slice_top1 = lambda x: x[:, 0:1]

            diff_top5 = metrics.top_k_categorical_accuracy(tiled_images, diff)
            return diff_top5

        return top_accuracy
项目:deep-models    作者:LaurentMazare    | 项目源码 | 文件源码
def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants
项目:KerasCog    作者:ABAtanasov    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.0))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.0))
        return constants
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def get_attention_initial_state(self, inputs):
        """Creates initial state for attention mechanism. By default the
        attention representation `attention_h` computed by attention_step is
        passed as attention state between timesteps.

        Extending attention implementations that requires additional states
        must modify over implement this method accordingly.

        # Arguments
            inputs: layer inputs

        # Returns
            list (length one) of initial state (zeros)
        """
        # build an all-zero tensor of shape (samples, output_dim)
        initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_state = K.tile(initial_state, [1, self.attention_output_dim])  # (samples, output_dim)
        return [initial_state]
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(2)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(2)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(2)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(2)])
        return constants
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.hidden_recurrent_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))

        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))

        return constants
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.input_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:value_gradient    作者:rarilurelo    | 项目源码 | 文件源码
def optimize_pi(self, batch):
        if not self.built:
            self.build()
        sampled_action_for_M = self.sess.run(self.sampled_action_for_M, {self.states: batch['states']})
        sampled_action = np.transpose(sampled_action_for_M, (1, 0, 2))[:, :, np.newaxis, :]
        pairwise_d = np.sum((np.tile(sampled_action, (self.M_pi, 1)) - \
            np.transpose(np.tile(sampled_action, (self.M_pi, 1)), (0, 2, 1, 3)))**2, axis=3).reshape(sampled_action.shape[0], -1)
        d = np.median(pairwise_d, axis=1)
        h = d/(2*np.log(self.M_pi+1))
        feed_in = {
                self.states: batch['states'],
                self.actions: batch['actions'],
                self.sampled_action_feeder: sampled_action_for_M,
                self.h: h,
                }
        self.sess.run(self.pi_updater, feed_in)
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def update_memory(self, z_t, h_t, mem_tm1):
        '''
        This method takes the attention vector (z_t), writer output (h_t) and previous timestep's memory (mem_tm1)
        and updates the memory. Implements equations 6, 14 or 15.
        '''
        tiled_z_t = K.tile(K.expand_dims(z_t), (self.output_dim))  # (batch_size, input_length, output_dim)
        input_length = K.shape(mem_tm1)[1]
        # (batch_size, input_length, output_dim)
        tiled_h_t = K.permute_dimensions(K.tile(K.expand_dims(h_t), (input_length)), (0, 2, 1))
        # Updating memory. First term in summation corresponds to selective forgetting and the second term to
        # selective addition. Equation 6.
        mem_t = mem_tm1 * (1 - tiled_z_t) + tiled_h_t * tiled_z_t  # (batch_size, input_length, output_dim)
        return mem_t
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def get_initial_state(self, inputs):
        dense_initial_state = K.zeros_like(inputs)
        dense_initial_state = K.sum(dense_initial_state, axis=(1, 2))
        dense_initial_state = K.expand_dims(dense_initial_state)
        dense_initial_state = K.tile(dense_initial_state, [1, self.dense_layer.units])
        return [dense_initial_state] + self.recurrent_layer.get_initial_state(inputs)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def compute_mask(self, inputs, mask):
        output_mask =  self.layer.compute_mask(
            inputs=inputs,
            mask=mask,
        )

        if self.time_steps is None:
            return output_mask
        else:
            output_mask = K.ones_like(output_mask)
            output_mask = K.any(output_mask, axis=1, keepdims=True)
            return K.tile(output_mask, [1, self.time_steps])
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def get_initial_states(self, inputs):
        # build an all-zero tensor of shape (samples, units)
        initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_state = K.tile(initial_state, [1, self.units])  # (samples, units)
        initial_states = [initial_state for _ in range(len(self.states))]
        return initial_states
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        if 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = K.in_train_phase(dropped_inputs,
                                       ones,
                                       training=training)
            constants.append(dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = K.in_train_phase(dropped_inputs,
                                           ones,
                                           training=training)
            constants.append(rec_dp_mask)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants

# Aliases
项目:Keras-Multiplicative-LSTM    作者:titu1994    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation != 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(5)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(5)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(5)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(5)])
        return constants
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_error(s, height, width, base):
    P = len(setting['panels'])
    s = K.reshape(s,[-1,height,base,width,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,height,width,1,base,base])
    s = K.tile(s, [1,1,1,P,1,1,])

    allpanels = K.variable(np.array(setting['panels']))
    allpanels = K.reshape(allpanels, [1,1,1,P,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], height, width, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        x = K.reshape(x, [-1,height,width,P, base//2, 2, base//2, 2])
        x = K.mean(x, axis=(5,7))
        return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        # return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_errors(states,base,pad,dim,size):
    # address the numerical viscosity in swirling
    s = K.round(states+viscosity_adjustment)
    s = Reshape((dim+2*pad,dim+2*pad,1))(s)
    s = Cropping2D(((pad,pad),(pad,pad)))(s)
    s = K.reshape(s,[-1,size,base,size,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,size,size,1,base,base])
    s = K.tile   (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2

    allpanels = K.variable(panels)
    allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        x = K.reshape(x, [-1,size,size,2, base//3, 3, base//3, 3])
        x = K.mean(x, axis=(5,7))
        return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        # return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_errors(states,base,dim,size):
    s = K.reshape(states,[-1,size,base,size,base])
    s = K.permute_dimensions(s, [0,1,3,2,4])
    s = K.reshape(s,[-1,size,size,1,base,base])
    s = K.tile   (s,[1, 1, 1, 2, 1, 1,]) # number of panels : 2

    allpanels = K.variable(panels)
    allpanels = K.reshape(allpanels, [1,1,1,2,base,base])
    allpanels = K.tile(allpanels, [K.shape(s)[0], size,size, 1, 1, 1])

    def hash(x):
        ## 2x2 average hashing
        # x = K.reshape(x, [-1,size,size,2, base//2, 2, base//2, 2])
        # x = K.mean(x, axis=(5,7))
        # return K.round(x)
        ## diff hashing (horizontal diff)
        # x1 = x[:,:,:,:,:,:-1]
        # x2 = x[:,:,:,:,:,1:]
        # d = x1 - x2
        # return K.round(d)
        ## just rounding
        return K.round(x)
        ## do nothing
        # return x

    # s         = hash(s)
    # allpanels = hash(allpanels)

    # error = K.binary_crossentropy(s, allpanels)
    error = K.abs(s - allpanels)
    error = hash(error)
    error = K.mean(error, axis=(4,5))
    return error
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def generate1(config,disks,towers, **kwargs):
    l = len(config)
    tower_width  = disks * (2*disk_inc) + base_disk_width + border
    tower_height = disks*disk_height
    figure = np.ones([tower_height,
                      tower_width*towers],dtype=np.int8)
    state = config_state(config,disks,towers)
    for i, tower in enumerate(state):
        tower.reverse()
        # print(i,tower)
        x_center = tower_width *  i + disks * disk_inc # lacks base_disk_width
        for j,disk in enumerate(tower):
            # print(j,disk,(l-j)*2)
            figure[
                tower_height - disk_height * (j+1) :
                tower_height - disk_height * j,
                x_center - disk * disk_inc :
                x_center + disk * disk_inc + base_disk_width] \
                = 0
                # = np.tile(np.tile(patterns[disk],(tile_factor,tile_factor)),
                #           (1,2*disks+base_disk_width_factor))[:,:2 * disk * disk_inc + base_disk_width]
                # = np.tile(np.tile(patterns[disk],(tile_factor,tile_factor)),
                #           (1,disk+base_disk_width_factor))
                # = np.tile(np.tile(patterns[disk],(tile_factor,tile_factor)),
                #           (1,2*disk+base_disk_width_factor))
    return preprocess(figure)
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants
项目:pmet    作者:bkj    | 项目源码 | 文件源码
def lifted_loss(margin=1):
    """
      Lifted loss, per "Deep Metric Learning via Lifted Structured Feature Embedding" by Song et al
      Implemented in `keras`

      See also the `pytorch` implementation at: https://gist.github.com/bkj/565c5e145786cfd362cffdbd8c089cf4
    """
    def f(target, score):

        # Compute mask (-1 for different class, 1 for same class, 0 for diagonal)
        mask = (2 * K.equal(0, target - K.reshape(target, (-1, 1))) - 1)
        mask = (mask - K.eye(score.shape[0]))

        # Compute distance between rows
        mag  = (score ** 2).sum(axis=-1)
        mag  = K.tile(mag, (mag.shape[0], 1))
        dist = (mag + mag.T - 2 * score.dot(score.T))
        dist = K.sqrt(K.maximum(0, dist))

        # Negative component (points from different class should be far)
        l_n = K.sum((K.exp(margin - dist) * K.equal(mask, -1)), axis=-1)
        l_n = K.tile(l_n, (score.shape[0], 1))
        l_n = K.log(l_n + K.transpose(l_n))
        l_n = l_n * K.equal(mask, 1)

        # Positive component (points from same class should be close)
        l_p = dist * K.equal(mask, 1)

        loss  = K.sum((K.maximum(0, l_n + l_p) ** 2))
        n_pos = K.sum(K.equal(mask, 1))
        loss /= (2 * n_pos)

        return loss

    return f

# --
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def get_initial_states(self, x):
            initial_state = K.expand_dims(self.h0,dim=0) # (1, output_dim)
            initial_state = K.tile(initial_state, [x.shape[0], 1])  # (samples, output_dim)
            #initial_states = [initial_state for _ in range(len(self.states))]
            initial_states = [initial_state]
            return initial_states
项目:keras-attention    作者:datalogue    | 项目源码 | 文件源码
def get_initial_state(self, inputs):
        print('inputs shape:', inputs.get_shape())

        # apply the matrix on the first time step to get the initial s0.
        s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))

        # from keras.layers.recurrent to initialize a vector of (batchsize,
        # output_dim)
        y0 = K.zeros_like(inputs)  # (samples, timesteps, input_dims)
        y0 = K.sum(y0, axis=(1, 2))  # (samples, )
        y0 = K.expand_dims(y0)  # (samples, 1)
        y0 = K.tile(y0, [1, self.output_dim])

        return [y0, s0]
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def mil_squared_error(y_true, y_pred):
    return K.tile(K.square(K.max(y_pred) - K.max(y_true)), 5)
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def get_initial_states(self, x):
            initial_state = K.expand_dims(self.h0,dim=0) # (1, output_dim)
            initial_state = K.tile(initial_state, [x.shape[0], 1])  # (samples, output_dim)
            #initial_states = [initial_state for _ in range(len(self.states))]
            initial_states = [initial_state]
            return initial_states
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def get_attention_initial_state(self, inputs):
        [attention_tm1_state] = super(
            GravesSequenceAttention,
            self
        ).get_attention_initial_state(inputs)
        kappa_tm1 = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
        kappa_tm1 = K.sum(kappa_tm1, axis=(1, 2))  # (samples,)
        kappa_tm1 = K.expand_dims(kappa_tm1)  # (samples, 1)
        kappa_tm1 = K.tile(kappa_tm1, [1, self.distribution.n_components])  # (samples, n_components)

        return [attention_tm1_state, kappa_tm1]
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_initial_states(self, x):
        print("initial state building")
        # build an all-zero tensor of shape (samples, output_dim)
        initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_states=[]
        for dim in self.states_dim:
            initial_states.append(K.tile(initial_state, [1, dim]))  # (samples, output_dim)
        #initial_states = [initial_state for _ in range(len(self.states))]
        return initial_states
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_initial_states(self, x):
        print("initial state building")
        # build an all-zero tensor of shape (samples, output_dim)
        initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_state = K.tile(initial_state, [1, self.input_dim])

        initial_states = [initial_state for _ in range(len(self.states))]
        return initial_states
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def get_initial_states(self, x):
        initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
        initial_state = K.expand_dims(initial_state)  # (samples, 1)
        initial_states=[]
        for dim in self.states_dim:
            initial_states.append(K.tile(initial_state, [1, dim]))
        return initial_states
项目:PhasedLSTM-Keras    作者:fferroni    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        if self.implementation == 0 and 0 < self.dropout < 1:
            input_shape = K.int_shape(inputs)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))

            def dropped_inputs():
                return K.dropout(ones, self.dropout)

            dp_mask = [K.in_train_phase(dropped_inputs,
                                        ones,
                                        training=training) for _ in range(4)]
            constants.append(dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(4)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]
        input_dim = input_shape[2]
        self.input_dim = input_dim

        self.W = self.init((input_dim, self.output_dim),
                           name='{}_W'.format(self.name))
        #self.b = K.zeros((self.N,), name='{}_b'.format(self.name))
        self.b = initializations.uniform((self.N,),scale=0.01,name='{}_b'.format(self.name))
        self.baug=K.tile(self.b,[2])

        h0 = self.h0_mean+initializations.uniform((2*self.N,),scale=0.01).get_value()
        self.h0 = K.variable(h0,name='{}_h0'.format(self.name))

        if ('full' in self.unitary_impl):   
            # we're using a full unitary recurrence matrix

            if (self.inner_init=='svd'):
                # use SVD to initialize U
                self.U = unitary_svd_init((self.N, self.N),name='{}_U'.format(self.name))
            elif (self.inner_init=='ASB2016'):
                # use parameterization of [ASB2016] to initialize U
                Uaug,_,_,_ = unitary_ASB2016_init((self.N,self.N))
                Uaug=Uaug.eval()
                self.U=K.variable(np.concatenate((Uaug[:self.N,:self.N],Uaug[:self.N,self.N:]),axis=0),name='{}_U'.format(self.name))

            self.Uaug=augRight(self.U,module=K)

        elif (self.unitary_impl=='ASB2016'):
            # we're using the parameterization of [Arjovsky, Shah, Bengio 2016]
            self.Uaug,self.theta,self.reflection,_ = unitary_ASB2016_init((self.N, self.N),name=self.name)

        # set the trainable weights
        if ('full' in self.unitary_impl):
            self.trainable_weights = [self.W, self.U, self.b, self.h0]
        elif (self.unitary_impl=='ASB2016'):
            self.trainable_weights = [self.W, self.theta, self.reflection, self.b, self.h0]

        self.regularizers = []
        #if self.W_regularizer:
        #    self.W_regularizer.set_param(self.W)
        #    self.regularizers.append(self.W_regularizer)
        #if self.U_regularizer:
        #    self.U_regularizer.set_param(self.U)
        #    self.regularizers.append(self.U_regularizer)
        #if self.b_regularizer:
        #    self.b_regularizer.set_param(self.b)
        #    self.regularizers.append(self.b_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def hinge_rank_loss(word_vectors, image_vectors, TESTING=False):
    """
    Custom hinge loss per (image, label) example - Page4.
    word_vectors is y_true
    image_vectors is y_pred
    """
    slice_first = lambda x: x[0:1 , :]
    slice_but_first = lambda x: x[1:, :]

    # separate correct/wrong images
    correct_image = Lambda(slice_first, output_shape=(1, WORD_DIM))(image_vectors)
    wrong_images = Lambda(slice_but_first, output_shape=(INCORRECT_BATCH, WORD_DIM))(image_vectors)

    # separate correct/wrong words
    correct_word = Lambda(slice_first, output_shape=(1, WORD_DIM))(word_vectors)
    wrong_words = Lambda(slice_but_first, output_shape=(INCORRECT_BATCH, WORD_DIM))(word_vectors)

    # l2 norm
    l2 = lambda x: K.sqrt(K.sum(K.square(x), axis=1, keepdims=True))
    l2norm = lambda x: x/l2(x)

    # tiling to replicate correct_word and correct_image
    correct_words = K.tile(correct_word, (INCORRECT_BATCH,1))
    correct_images = K.tile(correct_image, (INCORRECT_BATCH,1))

    # converting to unit vectors
    correct_words = l2norm(correct_words)
    wrong_words = l2norm(wrong_words)
    correct_images = l2norm(correct_images)
    wrong_images = l2norm(wrong_images)

    # correct_image VS incorrect_words | Note the singular/plurals
    # cost_images = MARGIN - K.sum(correct_images * correct_words, 1) + K.sum(correct_images * wrong_words, 1) 
    # cost_images = K.maximum(cost_images, 0.0)

    # correct_word VS incorrect_images | Note the singular/plurals
    cost_words = MARGIN - K.sum(correct_words * correct_images, axis=1) + K.sum(correct_words * wrong_images, axis=1) 
    cost_words = K.maximum(cost_words, 0.0)

    # currently cost_words and cost_images are vectors - need to convert to scalar
    # cost_images = K.sum(cost_images, axis=-1)
    cost_words  = K.sum(cost_words, axis=-1)

    if TESTING:
        # ipdb.set_trace()
        assert K.eval(wrong_words).shape[0] == INCORRECT_BATCH
        assert K.eval(correct_words).shape[0] == INCORRECT_BATCH
        assert K.eval(wrong_images).shape[0] == INCORRECT_BATCH
        assert K.eval(correct_images).shape[0] == INCORRECT_BATCH
        assert K.eval(correct_words).shape==K.eval(correct_images).shape
        assert K.eval(wrong_words).shape==K.eval(wrong_images).shape
        assert K.eval(correct_words).shape==K.eval(wrong_images).shape

    # return cost_words + cost_images
    return cost_words/INCORRECT_BATCH
项目:ssd3Dbv    作者:pierluigiferrari    | 项目源码 | 文件源码
def call(self, x, mask=None):
        '''
        Return an anchor box tensor based on the input tensor.

        The logic implemented here is identical to the logic in the module `ssd3Dbv_box_encode_decode_utils.py`.

        Note that this tensor does not participate in any graph computations at runtime. It is being created
        as a constant once for each classification conv layer during graph creation and is just being output
        along with the rest of the model output during runtime. Because of this, all logic is implemented
        as Numpy array operations and it sufficient to convert the resulting Numpy array into a Keras tensor
        at the very end before outputting it.
        '''
        # Compute box lengths, widths and heights as fractions of the shorter image side
        size = min(self.img_height, self.img_width)
        lwh = size * self.this_anchor_lwhs # 2D array of shape `(n_boxes, lwh values)`

        # We need the shape of the input tensor
        if K.image_dim_ordering() == 'tf':
            batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape
        else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future
            batch_size, feature_map_channels, feature_map_height, feature_map_width = x._keras_shape

        # Compute the grid of box center points. They are identical for all lwh combinations
        cell_height = self.img_height / feature_map_size[0]
        cell_width = self.img_width / feature_map_size[1]
        cx = np.linspace(cell_width/2, self.img_width-cell_width/2, feature_map_size[1])
        cy = np.linspace(cell_height/2, self.img_height-cell_height/2, feature_map_size[0])
        cx_grid, cy_grid = np.meshgrid(cx, cy)
        cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down
        cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down

        # Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 6)`
        # where the last axis will contain `(cx, cy, cz, l, w, h)`
        boxes_tensor = np.zeros((feature_map_size[0], feature_map_size[1], self.n_boxes, 6))

        boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
        boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
        boxes_tensor[:, :, :, 2] = lwh[:, 2] / 2 # Set cz - all boxes are placed on the ground plane, so cz is just half of the box's height
        boxes_tensor[:, :, :, 3] = lwh[:, 0] # Set l
        boxes_tensor[:, :, :, 4] = lwh[:, 1] # Set w
        boxes_tensor[:, :, :, 5] = lwh[:, 2] # Set h

        # Converts coordinates from (cx, cy, cz, l, w, h) to (x1, x2, x3, x4, y1, y2, y3, y4, h),
        # the 'corner_points' format - where (xk, yk) are the coordinates of pk, the kth point of the
        # box ground plane and h is the height of the box. Note that p1 and p2 are the top left and right
        # points of the ground plane and p3 and p4 are the bottom left and right points.
        boxes_tensor2 = np.zeros((feature_map_size[0], feature_map_size[1], self.n_boxes, 9))

        boxes_tensor2[:, :, :, [0,2]] = np.expand_dims(boxes_tensor[:, :, :, 0] - (boxes_tensor[:, :, :, 3] / 2), axis=-1) # cx - 0.5l == x1, x3
        boxes_tensor2[:, :, :, [1,3]] = np.expand_dims(boxes_tensor[:, :, :, 0] + (boxes_tensor[:, :, :, 3] / 2), axis=-1) # cx + 0.5l == x2, x4
        boxes_tensor2[:, :, :, [4,6]] = np.expand_dims(boxes_tensor[:, :, :, 1] - (boxes_tensor[:, :, :, 4] / 2), axis=-1) # cy - 0.5w == y1, y3
        boxes_tensor2[:, :, :, [5,7]] = np.expand_dims(boxes_tensor[:, :, :, 1] + (boxes_tensor[:, :, :, 4] / 2), axis=-1) # cy + 0.5w == y2, y4
        boxes_tensor2[:, :, :, 8] = boxes_tensor[:, :, :, 5] # h == h

        # Now prepend one dimension to `boxes_tensor2` to account for the batch size and tile it along
        # The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 9)`
        boxes_tensor2 = np.expand_dims(boxes_tensor2, axis=0)
        boxes_tensor2 = K.tile(K.constant(boxes_tensor2, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1))

        return boxes_tensor2
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]
        input_dim = input_shape[2]
        self.input_dim = input_dim

        self.W = self.init((input_dim, self.output_dim),
                           name='{}_W'.format(self.name))
        #self.b = K.zeros((self.N,), name='{}_b'.format(self.name))
        self.b = initializations.uniform((self.N,),scale=0.01,name='{}_b'.format(self.name))
        self.baug=K.tile(self.b,[2])

        h0 = self.h0_mean+initializations.uniform((2*self.N,),scale=0.01).get_value()
        self.h0 = K.variable(h0,name='{}_h0'.format(self.name))

        if ('full' in self.unitary_impl):   
            # we're using a full unitary recurrence matrix

            if (self.inner_init=='svd'):
                # use SVD to initialize U
                self.U = unitary_svd_init((self.N, self.N),name='{}_U'.format(self.name))
            elif (self.inner_init=='ASB2016'):
                # use parameterization of [ASB2016] to initialize U
                Uaug,_,_,_ = unitary_ASB2016_init((self.N,self.N))
                Uaug=Uaug.eval()
                self.U=K.variable(np.concatenate((Uaug[:self.N,:self.N],Uaug[:self.N,self.N:]),axis=0),name='{}_U'.format(self.name))

            self.Uaug=augRight(self.U,module=K)

        elif (self.unitary_impl=='ASB2016'):
            # we're using the parameterization of [Arjovsky, Shah, Bengio 2016]
            self.Uaug,self.theta,self.reflection,_ = unitary_ASB2016_init((self.N, self.N),name=self.name)

        # set the trainable weights
        if ('full' in self.unitary_impl):
            self.trainable_weights = [self.W, self.U, self.b, self.h0]
        elif (self.unitary_impl=='ASB2016'):
            self.trainable_weights = [self.W, self.theta, self.reflection, self.b, self.h0]

        self.regularizers = []
        #if self.W_regularizer:
        #    self.W_regularizer.set_param(self.W)
        #    self.regularizers.append(self.W_regularizer)
        #if self.U_regularizer:
        #    self.U_regularizer.set_param(self.U)
        #    self.regularizers.append(self.U_regularizer)
        #if self.b_regularizer:
        #    self.b_regularizer.set_param(self.b)
        #    self.regularizers.append(self.b_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:DIL    作者:FoxRow    | 项目源码 | 文件源码
def yolo_head(feats, anchors, num_classes):
    """Convert final layer features to bounding box parameters.

    Parameters
    ----------
    feats : tensor
        Final convolutional layer features.
    anchors : array-like
        Anchor box widths and heights.
    num_classes : int
        Number of target classes.

    Returns
    -------
    box_xy : tensor
        x, y box predictions adjusted by spatial location in conv layer.
    box_wh : tensor
        w, h box predictions adjusted by anchors and conv spatial resolution.
    box_conf : tensor
        Probability estimate for whether each box contains any object.
    box_class_pred : tensor
        Probability distribution estimate for each box over class labels.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])

    # Dynamic implementation of conv dims for fully convolutional model.
    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.softmax(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    box_xy = (box_xy + conv_index) / conv_dims
    box_wh = box_wh * anchors_tensor / conv_dims

    return box_xy, box_wh, box_confidence, box_class_probs