Python keras.backend 模块,reshape() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.reshape()

项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    correct_pixels_per_class = K.sum(equal_entries, axis=1)
    n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

    acc = correct_pixels_per_class / n_pixels_per_class
    acc_mask = tf.is_finite(acc)
    acc_masked = tf.boolean_mask(acc,acc_mask)

    return K.mean(acc_masked)
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def step(self, input_t, states):
        reader_states = states[:2]
        flattened_mem_tm1, flattened_shared_mem_tm1 = states[2:4]
        writer_h_tm1, writer_c_tm1 = states[4:]
        input_mem_shape = K.shape(flattened_mem_tm1)
        mem_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
        mem_tm1 = K.reshape(flattened_mem_tm1, mem_shape)
        shared_mem_tm1 = K.reshape(flattened_shared_mem_tm1, mem_shape)
        reader_constants = self.reader.get_constants(input_t)
        reader_states += reader_constants
        o_t, [_, reader_c_t] = self.reader.step(input_t, reader_states)
        z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
        shared_z_t, shared_m_rt = self.summarize_memory(o_t, shared_mem_tm1)
        c_t = self.compose_memory_and_output([o_t, m_rt, shared_m_rt])
        # Collecting the necessary variables to directly call writer's step function.
        writer_constants = self.writer.get_constants(c_t)  # returns dropouts for W and U (all 1s, see init)
        writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
        # Making a call to writer's step function, Equation 5
        h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states)  # h_t, writer_c_t: (batch_size, output_dim)
        mem_t = self.update_memory(z_t, h_t, mem_tm1)
        shared_mem_t = self.update_memory(shared_z_t, h_t, shared_mem_tm1)
        return h_t, [o_t, reader_c_t, K.batch_flatten(mem_t), K.batch_flatten(shared_mem_t), h_t, writer_c_t]
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def step_with_training(self, training=None):

        def step(inputs, states):
            input_shape = K.int_shape(inputs)
            y_tm1 = self.layer.preprocess_input(
                K.expand_dims(states[0], axis=1),
                training
            )
            y_tm1 = K.reshape(y_tm1, (-1, input_shape[-1]))

            inputs_sum = tf.reduce_sum(inputs)

            def inputs_f(): return inputs
            def output_f(): return y_tm1
            current_inputs = tf.case(
                [(tf.equal(inputs_sum, 0.0), output_f)],
                default=inputs_f
            )

            return self.layer.step(
                current_inputs,
                states
            )

        return step
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def call(self, inputs, mask=None, initial_state=None, training=None):
        inputs_shape = K.shape(inputs)
        zeros = tf.zeros(
            shape=[
                inputs_shape[0],
                inputs_shape[1] - 1,
                self.layer.units
            ]
        )
        outputs = self.layer.call(
            inputs=inputs,
            mask=mask,
            initial_state=initial_state,
            training=training
        )
        outputs = K.reshape(
            tf.slice(outputs, [0, inputs_shape[1] - 1, 0], [-1, 1, -1]),
            shape=(inputs_shape[0], 1, self.layer.units)
        )
        outputs = K.concatenate([outputs, zeros], axis=1)

        if 0 < self.layer.dropout + self.layer.recurrent_dropout:
            outputs._uses_learning_phase = True
        return outputs
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        input_shape = K.int_shape(inputs)
        outputs = self.layer.call(inputs)
        outputs = K.permute_dimensions(
            outputs,
            self.permute_pattern + [len(input_shape) - 1]
        )
        outputs_shape = self.compute_output_shape(input_shape)
        outputs = K.reshape(
            outputs,
            (-1, outputs_shape[1], outputs_shape[2])
        )

        mask_tensor = self.compute_mask(
            inputs,
            mask
        )
        mask_tensor = K.cast(mask_tensor, K.floatx())
        mask_tensor = K.expand_dims(mask_tensor)
        mask_output = K.repeat_elements(
            mask_tensor,
            outputs_shape[2],
            2
        )
        return outputs * mask_output
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def label_test_file(self):
    outfile = open("pred_vld.txt","w")
    prep_alfa = lambda X: pad_sequences(sequences=self.indexer.texts_to_sequences(X),
                                        maxlen=self.SentMaxLen)
    vld = json.loads(open('validation.json', 'r').read())
    for prem, hypo, label in zip(vld[0], vld[1], vld[2]):
      prem_pad, hypo_pad = prep_alfa([prem]), prep_alfa([hypo])
      ans = np.reshape(self.model.predict(x=[prem_pad, hypo_pad], batch_size = 1), -1)  # PREDICTION
      if np.argmax(ans) != label:
        outfile.write(prem + "\n" + hypo + "\n")
        outfile.write("Truth: " + self.rLabels[label] + "\n")
        outfile.write('Contradiction \t{:.1f}%\n'.format(float(ans[0]) * 100) +
                      'Neutral \t\t{:.1f}%\n'.format(float(ans[1]) * 100) +
                      'Entailment \t{:.1f}%\n'.format(float(ans[2]) * 100))
        outfile.write("-"*15 + "\n")
    outfile.close()
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def _full_matching(self, h1, h2, w):
        """Full matching operation.

        # Arguments
            h1: (batch_size, h1_timesteps, embedding_size)
            h2: (batch_size, h2_timesteps, embedding_size)
            w: weights of one direction, (mp_dim, embedding_size)

        # Output shape
            (batch_size, h1_timesteps, mp_dim)
        """
        # h2 forward last step hidden vector, (batch_size, embedding_size)
        h2_last_state = h2[:, -1, :]
        # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
        h1 = self._time_distributed_multiply(h1, w)
        # h2_last_state * weights, (batch_size, mp_dim, embedding_size)
        h2 = self._time_distributed_multiply(h2_last_state, w)
        # reshape to (batch_size, 1, mp_dim, embedding_size)
        h2 = K.expand_dims(h2, axis=1)
        # matching vector, (batch_size, h1_timesteps, mp_dim)
        matching = self._cosine_similarity(h1, h2)
        return matching
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def _max_pooling_matching(self, h1, h2, w):
        """Max pooling matching operation.

        # Arguments
            h1: (batch_size, h1_timesteps, embedding_size)
            h2: (batch_size, h2_timesteps, embedding_size)
            w: weights of one direction, (mp_dim, embedding_size)

        # Output shape
            (batch_size, h1_timesteps, mp_dim)
        """
        # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
        h1 = self._time_distributed_multiply(h1, w)
        # h2 * weights, (batch_size, h2_timesteps, mp_dim, embedding_size)
        h2 = self._time_distributed_multiply(h2, w)
        # reshape v1 to (batch_size, h1_timesteps, 1, mp_dim, embedding_size)
        h1 = K.expand_dims(h1, axis=2)
        # reshape v1 to (batch_size, 1, h2_timesteps, mp_dim, embedding_size)
        h2 = K.expand_dims(h2, axis=1)
        # cosine similarity, (batch_size, h1_timesteps, h2_timesteps, mp_dim)
        cos = self._cosine_similarity(h1, h2)
        # (batch_size, h1_timesteps, mp_dim)
        matching = K.max(cos, axis=2)
        return matching
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def to_configs(states, verbose=True, **kwargs):
    base = setting['base']
    width  = states.shape[1] // base
    height = states.shape[1] // base
    load(width,height)

    def build():
        P = len(setting['panels'])
        states = Input(shape=(height*base,width*base))
        error = build_error(states, height, width, base)

        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], height * width, -1])
        # a, pos, panel
        matches = K.permute_dimensions(matches, [0,2,1])
        # a, panel, pos
        config = matches * K.arange(height*width,dtype='float')
        config = K.sum(config, axis=-1)
        return Model(states, wrap(states, config))

    model = build()
    return model.predict(states, **kwargs)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
    # **kwargs is for unsupported options (ignored)
    cval = tf.fill(K.shape(image)[0:1], cval)
    shape = K.int_shape(image)[1:3]
    if center is None:
        center = np.array(shape) / 2
    ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
    xs = np.expand_dims(np.tile  (np.arange(shape[1]), shape[0]),-1)
    map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)

    mapping = np.zeros((*shape, *shape))
    for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
        results = tensor_linear_interpolation(image, map_x, map_y, cval)
        for _y, _x, w in results:
            # mapping[int(y),int(x),int(_y),int(_x),] = w
            mapping[int(_y),int(_x),int(y),int(x),] = w


    results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
    # results = K.reshape(results, K.shape(image))
    return results
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def generate_gpu(configs,**kwargs):
    configs = np.array(configs)
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size

    def build():
        P = 2
        configs = Input(shape=(size*size,))
        _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
        configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
        configs_one_hot = K.reshape(configs_one_hot, [-1,P])
        _panels = K.variable(panels)
        _panels = K.reshape(_panels, [P, base*base])
        states = tf.matmul(configs_one_hot, _panels)
        states = K.reshape(states, [-1, size, size, base, base])
        states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
        states = K.reshape(states, [-1, size*base, size*base, 1])
        states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
        states = K.squeeze(states, -1)
        return Model(configs, wrap(configs, states))

    return preprocess(batch_swirl(build().predict(configs,**kwargs)))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def to_configs(states, verbose=True, **kwargs):
    base = panels.shape[1]
    dim  = states.shape[1] - pad*2
    size = dim // base

    def build():
        states = Input(shape=(dim+2*pad,dim+2*pad))
        s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args)
        error = build_errors(s,base,pad,dim,size)
        matches = 1 - K.clip(K.sign(error - threshold),0,1)
        # a, h, w, panel
        matches = K.reshape(matches, [K.shape(states)[0], size * size, -1])
        # a, pos, panel
        config = matches * K.arange(2,dtype='float')
        config = K.sum(config, axis=-1)
        # this is 0,1 configs; for compatibility, we need -1 and 1
        config = - (config - 0.5)*2
        return Model(states, wrap(states, K.round(config)))

    return build().predict(states, **kwargs)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def generate_cpu(configs, **kwargs):
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size
    def generate(config):
        figure = np.zeros((dim,dim))
        for pos,value in enumerate(config):
            x = pos % size
            y = pos // size
            if value > 0:
                figure[y*base:(y+1)*base,
                       x*base:(x+1)*base] = panels[0]
            else:
                figure[y*base:(y+1)*base,
                       x*base:(x+1)*base] = panels[1]
        return preprocess(figure)
    return np.array([ generate(c) for c in configs ]).reshape((-1,dim,dim))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def generate_gpu(configs, **kwargs):
    import math
    size = int(math.sqrt(len(configs[0])))
    base = panels.shape[1]
    dim = base*size

    def build():
        P = 2
        configs = Input(shape=(size*size,))
        _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
        configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
        configs_one_hot = K.reshape(configs_one_hot, [-1,P])
        _panels = K.variable(panels)
        _panels = K.reshape(_panels, [P, base*base])
        states = tf.matmul(configs_one_hot, _panels)
        states = K.reshape(states, [-1, size, size, base, base])
        states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
        states = K.reshape(states, [-1, size*base, size*base])
        return Model(configs, wrap(configs, states))

    return build().predict(np.array(configs),**kwargs)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def get_initial_states(self, x):

        init_state_h = K.zeros_like(x)
        init_state_h = K.sum(init_state_h, axis = 1)
        reducer_s = K.zeros((self.input_dim, self.hidden_dim))
        reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
        reducer_p = K.zeros((self.hidden_dim, self.output_dim))
        init_state_h = K.dot(init_state_h, reducer_s)

        init_state_p = K.dot(init_state_h, reducer_p)

        init_state = K.zeros_like(init_state_h)
        init_freq = K.dot(init_state_h, reducer_f)

        init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
        init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))

        init_state_S_re = init_state * init_freq
        init_state_S_im = init_state * init_freq

        init_state_time = K.cast_to_floatx(0.)

        initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
        return initial_states
项目:State-Frequency-Memory-stock-prediction    作者:z331565360    | 项目源码 | 文件源码
def get_initial_states(self, x):

        init_state_h = K.zeros_like(x)
        init_state_h = K.sum(init_state_h, axis = 1)
        reducer_s = K.zeros((self.input_dim, self.hidden_dim))
        reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
        reducer_p = K.zeros((self.hidden_dim, self.output_dim))
        init_state_h = K.dot(init_state_h, reducer_s)

        init_state_p = K.dot(init_state_h, reducer_p)

        init_state = K.zeros_like(init_state_h)
        init_freq = K.dot(init_state_h, reducer_f)

        init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
        init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))

        init_state_S_re = init_state * init_freq
        init_state_S_im = init_state * init_freq

        init_state_time = K.cast_to_floatx(0.)

        initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
        return initial_states
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def setup_output(self, x):
    """
    Setup output tensor

    """
    x_max = K.max(x, axis=1)

    x_max = K.flatten(x_max)
    z = K.dot(x_max, self.w_proj_to_z) #+ self.b_proj_to_z
    hidden = K.dot(z, self.weights[0]) + self.biases[0]
    hidden = K.reshape(hidden, shape=(self.input_channels,
                                      self.hidden_dim))

    output = K.dot(hidden, self.weights[1]) + self.biases[1]

    self.output = K.reshape(output, (self.num_filters, self.input_channels,
                                     *self.output_shape))
    return self.output
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, x, mask=None):
        conv_out = K.conv2d(x, self.W, strides=self.strides,
                            padding=self.padding,
                            data_format=self.data_format,
                            filter_shape=self.kernel_shape)

        if self.data_format == 'channels_first':
            # Complex-cell filter operation
            conv_out1 = K.sqrt(K.square(conv_out[:, :self.filters_complex, :, :]) + K.square(conv_out[:, self.filters_complex:2*self.filters_complex, :, :]) + K.epsilon())
            # Simple-cell filter operation
            conv_out2 = K.concatenate([conv_out1, conv_out[:, 2*self.filters_complex:, :, :]], axis=1)
        elif self.data_format == 'channels_last':
            # Complex-cell filter operation
            conv_out1 = K.sqrt(K.square(conv_out[:, :, :, :self.filters_complex]) + K.square(conv_out[:, :, :, self.filters_complex:2*self.filters_complex]) + K.epsilon())
            # Simple-cell filter operation
            conv_out2 = K.concatenate([conv_out1, conv_out[:, :, :, 2*self.filters_complex:]], axis=3)

        if self.bias:
            if self.data_format == 'channels_first':
                conv_out2 += K.reshape(self.b, (1, self.filters_complex + self.filters_simple, 1, 1))
            elif self.data_format == 'channels_last':
                conv_out2 += K.reshape(self.b, (1, 1, 1, self.filters_complex + self.filters_simple))

        return self.activation(conv_out2)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs):
        stim = inputs[0]
        center = inputs[1]
        centers_x = self.XX[None, :, :, None] - center[:, 0, None, None, None] - self.centers[0][None, None, None, :]
        centers_y = self.YY[None, :, :, None] - center[:, 1, None, None, None] - self.centers[1][None, None, None, :]
        senv = self.stds[None, None, None, :]
        gauss = self.gauss_scale * (K.square(self.dx) / (2 * np.pi * K.square(senv) + K.epsilon()))*K.exp(-(K.square(centers_x) + K.square(centers_y))/(2.0 * K.square(senv)))
        # gauss = (1 / K.sqrt(2 * np.pi * K.square(senv) + K.epsilon()))*K.exp(-(K.square(centers_x) + K.square(centers_y))/(2.0 * K.square(senv)))
        # gauss /= K.max(gauss, axis=(1, 2), keepdims=True)
        gauss = K.reshape(gauss, self.kernel_shape)

        if K.backend() == 'theano':
            output = K.sum(stim[..., None] * K.pattern_broadcast(gauss, self.kernel_broadcast), axis=self.filter_axes, keepdims=False)
        else:
            output = K.sum(stim[..., None] * gauss, axis=self.filter_axes, keepdims=False)

        return output
项目:DeepMoji    作者:bfelbo    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def call(self, X, mask=None):

        input_shape = self.input_spec[0].shape

        x = K.reshape(X[0], (-1, input_shape[2]))
        target = X[1].flatten() if self.trainable else None

        Y = h_softmax(x, K.shape(x)[0], self.output_dim, 
                              self.n_classes, self.n_outputs_per_class,
                              self.W1, self.b1, self.W2, self.b2, target)

        output_dim = 1 if self.trainable else self.output_dim    
        input_length = K.shape(X[0])[1]

        y = K.reshape(Y, (-1, input_length, output_dim))
        return y
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_IoU(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    intersection = K.sum(equal_entries, axis=1)
    union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

    iou = intersection / (union_per_class - intersection)
    iou_mask = tf.is_finite(iou)
    iou_masked = tf.boolean_mask(iou,iou_mask)

    return K.mean( iou_masked )
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
            constants.append(B_U)
        else:
            constants.append(K.cast_to_floatx(1.))
        if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, input_dim))
            B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
            constants.append(B_W)
        else:
            constants.append(K.cast_to_floatx(1.))
        return constants
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def eval_loss_and_grads(self, x):
        x = x.reshape(self.output_shape)
        f_inputs = [x]
        # update the patch indexes
        start_t = time.time()
        for nnf in self.feature_nnfs:
            nnf.update(x, num_steps=self.args.mrf_nnf_steps)
            new_target = nnf.matcher.get_reconstruction()
            f_inputs.append(new_target)
        print('PatchMatch update in {:.2f} seconds'.format(time.time() - start_t))
        # run it through
        outs = self.f_outputs(f_inputs)
        loss_value = outs[0]
        if len(outs[1:]) == 1:
            grad_values = outs[1].flatten().astype('float64')
        else:
            grad_values = np.array(outs[1:]).flatten().astype('float64')
        return loss_value, grad_values
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def make_patches_grid(x, patch_size, patch_stride):
    '''Break image `x` up into a grid of patches.

    input shape: (channels, rows, cols)
    output shape: (rows, cols, channels, patch_rows, patch_cols)
    '''
    from theano.tensor.nnet.neighbours import images2neibs  # TODO: all K, no T
    x = K.expand_dims(x, 0)
    xs = K.shape(x)
    num_rows = 1 + (xs[-2] - patch_size) // patch_stride
    num_cols = 1 + (xs[-1] - patch_size) // patch_stride
    num_channels = xs[-3]
    patches = images2neibs(x,
        (patch_size, patch_size), (patch_stride, patch_stride),
        mode='valid')
    # neibs are sorted per-channel
    patches = K.reshape(patches, (num_channels, K.shape(patches)[0] // num_channels, patch_size, patch_size))
    patches = K.permute_dimensions(patches, (1, 0, 2, 3))
    # arrange in a 2d-grid (rows, cols, channels, px, py)
    patches = K.reshape(patches, (num_rows, num_cols, num_channels, patch_size, patch_size))
    patches_norm = K.sqrt(K.sum(K.square(patches), axis=(2,3,4), keepdims=True))
    return patches, patches_norm


# get tensor representations of our images
项目:wavenet    作者:basveeling    | 项目源码 | 文件源码
def make_soft(y_true, fragment_length, nb_output_bins, train_with_soft_target_stdev, with_prints=False):
    receptive_field, _ = compute_receptive_field()
    n_outputs = fragment_length - receptive_field + 1

    # Make a gaussian kernel.
    kernel_v = scipy.signal.gaussian(9, std=train_with_soft_target_stdev)
    print(kernel_v)
    kernel_v = np.reshape(kernel_v, [1, 1, -1, 1])
    kernel = K.variable(kernel_v)

    if with_prints:
        y_true = print_t(y_true, 'y_true initial')

    # y_true: [batch, timesteps, input_dim]
    y_true = K.reshape(y_true, (-1, 1, nb_output_bins, 1))  # Same filter for all output; combine with batch.
    # y_true: [batch*timesteps, n_channels=1, input_dim, dummy]
    y_true = K.conv2d(y_true, kernel, border_mode='same')
    y_true = K.reshape(y_true, (-1, n_outputs, nb_output_bins))  # Same filter for all output; combine with batch.
    # y_true: [batch, timesteps, input_dim]
    y_true /= K.sum(y_true, axis=-1, keepdims=True)

    if with_prints:
        y_true = print_t(y_true, 'y_true after')
    return y_true
项目:Parametric-t-SNE-in-Keras    作者:zaburo-ch    | 项目源码 | 文件源码
def x2p(X):
    tol = 1e-5
    n = X.shape[0]
    logU = np.log(perplexity)

    sum_X = np.sum(np.square(X), axis=1)
    D = sum_X + (sum_X.reshape([-1, 1]) - 2 * np.dot(X, X.T))

    idx = (1 - np.eye(n)).astype(bool)
    D = D[idx].reshape([n, -1])

    def generator():
        for i in xrange(n):
            yield i, D[i], tol, logU

    pool = mp.Pool(n_jobs)
    result = pool.map(x2p_job, generator())
    P = np.zeros([n, n])
    for i, thisP in result:
        P[i, idx[i]] = thisP

    return P
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:leaf-classification    作者:MWransky    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # compute the candidate hidden state
        transform = K.conv2d(x, self.W, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            transform += K.reshape(self.b, (1, 1, 1, self.nb_filter))
        transform = self.activation(transform)

        transform_gate = K.conv2d(x, self.W_gate, strides=self.subsample,
                          border_mode=self.border_mode,
                          dim_ordering=self.dim_ordering,
                          filter_shape=self.W_shape)
        if self.bias:
            transform_gate += K.reshape(self.b_gate, (1, 1, 1, self.nb_filter))
        transform_gate = K.sigmoid(transform_gate)
        carry_gate = 1.0 - transform_gate

        return transform * transform_gate + x * carry_gate

    # Define get_config method so load_from_json can run
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:neural-turkish-morphological-disambiguator    作者:onurgu    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        # In case the target shape is not fully defined,
        # we need access to the shape of x.
        # solution:
        # 1) rely on x._keras_shape
        # 2) fallback: K.int_shape
        target_shape = self.target_shape
        target_mask_shape = self.target_mask_shape
        if -1 in target_shape:
            # target shape not fully defined
            input_shape = None
            try:
                input_shape = K.int_shape(inputs)
            except TypeError:
                pass
            if input_shape is not None:
                target_shape = self.compute_output_shape(input_shape)[1:]

        _result = K.reshape(inputs, (-1,) + target_shape)
        reshaped_mask = K.reshape(mask, (-1,) + target_mask_shape + (1,))
        result = _result * K.cast(reshaped_mask, K.floatx())
        return result
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def conv_step(self, x, W, b=None, border_mode="valid"):
        input_shape = self.input_spec[0].shape

        conv_out = K.conv2d(x, W, strides=self.subsample,
                            border_mode=border_mode,
                            dim_ordering=self.dim_ordering,
                            image_shape=(input_shape[0],
                                         input_shape[2],
                                         input_shape[3],
                                         input_shape[4]),
                            filter_shape=self.W_shape)
        if b:
            if self.dim_ordering == 'th':
                conv_out = conv_out + K.reshape(b, (1, self.nb_filter, 1, 1))
            elif self.dim_ordering == 'tf':
                conv_out = conv_out + K.reshape(b, (1, 1, 1, self.nb_filter))
            else:
                raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        return conv_out
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants
项目:keras-convautoencoder    作者:nanopony    | 项目源码 | 文件源码
def get_output(self, train=False):
        X = self.get_input(train)
        conv_out = deconv2d_fast(X, self.W,
                                 strides=self.subsample,
                                 border_mode=self.border_mode,
                                 dim_ordering=self.dim_ordering,
                                 image_shape=self.input_shape,
                                 filter_shape=self.W_shape)
        if self.dim_ordering == 'th':
            output = conv_out + K.reshape(self.b, (1, self.nb_out_channels, 1, 1))
        elif self.dim_ordering == 'tf':
            output = conv_out + K.reshape(self.b, (1, 1, 1, self.nb_out_channels))
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        output = self.activation(output)
        return output
项目:image-analogies    作者:awentzonline    | 项目源码 | 文件源码
def find_analogy_patches(a, a_prime, b, patch_size=3, patch_stride=1):
    '''This is for precalculating the analogy_loss

    Since A, A', and B never change we only need to calculate the patch matches once.
    '''
    # extract patches from feature maps
    a_patches, a_patches_norm = patches.make_patches(K.variable(a), patch_size, patch_stride)
    a_prime_patches, a_prime_patches_norm = patches.make_patches(K.variable(a_prime), patch_size, patch_stride)
    b_patches, b_patches_norm = patches.make_patches(K.variable(b), patch_size, patch_stride)
    # find best patches and calculate loss
    p = patches.find_patch_matches(b_patches, b_patches_norm, a_patches / a_patches_norm)
    #best_patches = a_prime_patches[p]
    best_patches = K.reshape(a_prime_patches[p], K.shape(b_patches))
    f = K.function([], best_patches)
    best_patches = f([])
    return best_patches
项目:image-analogies    作者:awentzonline    | 项目源码 | 文件源码
def make_patches_grid(x, patch_size, patch_stride):
    '''Break image `x` up into a grid of patches.

    input shape: (channels, rows, cols)
    output shape: (rows, cols, channels, patch_rows, patch_cols)
    '''
    from theano.tensor.nnet.neighbours import images2neibs  # TODO: all K, no T
    x = K.expand_dims(x, 0)
    xs = K.shape(x)
    num_rows = 1 + (xs[-2] - patch_size) // patch_stride
    num_cols = 1 + (xs[-1] - patch_size) // patch_stride
    num_channels = xs[-3]
    patches = images2neibs(x,
        (patch_size, patch_size), (patch_stride, patch_stride),
        mode='valid')
    # neibs are sorted per-channel
    patches = K.reshape(patches, (num_channels, K.shape(patches)[0] // num_channels, patch_size, patch_size))
    patches = K.permute_dimensions(patches, (1, 0, 2, 3))
    # arrange in a 2d-grid (rows, cols, channels, px, py)
    patches = K.reshape(patches, (num_rows, num_cols, num_channels, patch_size, patch_size))
    patches_norm = K.sqrt(K.sum(K.square(patches), axis=(2,3,4), keepdims=True))
    return patches, patches_norm
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def register(self, info_tensor, param_tensor):
        self.info_tensor = info_tensor #(128,1)

        if self.stddev_fix:
            self.param_tensor = param_tensor

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
            std  = 1.0
        else:
            self.param_tensor = param_tensor # 2 

            mean = K.clip(param_tensor[:, 0].dimshuffle(0, 'x'), self.min, self.max) 
          # std  = K.maximum( param_tensor[:, 1].dimshuffle(0, 'x'), 0)
            std  = K.sigmoid( param_tensor[:, 1].dimshuffle(0, 'x') )

        e = (info_tensor-mean)/(std + K.epsilon())
        self.log_Q_c_given_x = \
            K.sum(-0.5*np.log(2*np.pi) -K.log(std+K.epsilon()) -0.5*(e**2), axis=1) * self.lmbd

#       m = Sequential([ Activation('softmax', input_shape=(self.n,)), Lambda(lambda x: K.log(x), lambda x: x) ])
        return K.reshape(self.log_Q_c_given_x, (-1, 1))