Python keras.backend 模块,constant() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用keras.backend.constant()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def update(self, batch):
        x, y = batch
        y = np.array(y)
        y_pred = None

        if self.model_type == 'nn':
            self.train_loss, self.train_acc = self.model.train_on_batch(x, y)
            y_pred = self.model.predict_on_batch(x).reshape(-1)
            self.train_auc = roc_auc_score(y, y_pred)

        if self.model_type == 'ngrams':
            x = vectorize_select_from_data(x, self.vectorizers, self.selectors)
            self.model.fit(x, y.reshape(-1))
            y_pred = np.array(self.model.predict_proba(x)[:,1]).reshape(-1)
            y_pred_tensor = K.constant(y_pred, dtype='float64')
            self.train_loss = K.eval(binary_crossentropy(y.astype('float'), y_pred_tensor))
            self.train_acc = K.eval(binary_accuracy(y.astype('float'), y_pred_tensor))
            self.train_auc = roc_auc_score(y, y_pred)
        self.updates += 1
        return y_pred
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="attention_generator")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_f(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_forw")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_b(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_back")
        return model
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, initial_power=1, axis=-1, **kwargs):
        self.alpha_pos_initializer = initializers.constant(1.)
        self.alpha_neg_initializer = initializers.constant(1.)
        self.beta_pos_initializer = initializers.constant(0.)
        self.beta_neg_initializer = initializers.constant(0.)
        self.rho_pos_initializer = initializers.constant(initial_power)
        self.rho_neg_initializer = initializers.constant(initial_power)

        self.alpha_pos_constraint = None
        self.alpha_neg_constraint = None
        self.beta_pos_constraint = None
        self.beta_neg_constraint = None
        self.rho_pos_constraint = None
        self.rho_neg_constraint = None

        self.alpha_pos_regularizer = None
        self.alpha_neg_regularizer = None
        self.beta_pos_regularizer = None
        self.beta_neg_regularizer = None
        self.rho_pos_regularizer = None
        self.rho_neg_regularizer = None

        self.axis = axis
        super(PowerPReLU, self).__init__(**kwargs)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def risk_estimation(y_true, y_pred):
    return -100. * K.mean((y_true - 0.0002) * y_pred)

######################
# my custom buy_hold_sell activation function
#####################
# from keras_step_function import tf_stepy
#
# def buy_hold_sell(x):
#   return tf_stepy(x)
#
# get_custom_objects().update({'custom_activation': Activation(buy_hold_sell)})


#######################
# classification style
# to work with y_pred as [buy, half, sell]
#######################
# def risk_estimation_bhs(y_true, y_pred):
    # return -100 * K.mean((y_true - 0.0002) * K.constant([1.0, 0.75, 0.5, 0.25, 0.0]) * y_pred) # -0.0002 is removed from original
    # return -100 * K.mean((y_true - 0.0002) * K.constant([1.0, 0.5, 0.0]) * y_pred) # -0.0002 is removed from original
项目:extkeras    作者:andhus    | 项目源码 | 文件源码
def _get_attention_and_kappa(self, attended, params, kappa_tm1):
        """
        # Args
            params: the params of this distribution
            attended: the attended sequence (samples, timesteps, features)
        # Returns
            attention tensor (samples, features)
        """
        att_idx = K.constant(np.arange(self.attended_shape[1])[None, :, None])
        alpha, beta, kappa_diff = self.distribution.split_param_types(params)
        kappa = kappa_diff + kappa_tm1

        kappa_ = K.expand_dims(kappa, 1)
        beta_ = K.expand_dims(beta, 1)
        alpha_ = K.expand_dims(alpha, 1)

        attention_w = K.sum(
            alpha_ * K.exp(- beta_ * K.square(kappa_ - att_idx)),
            axis=-1,
            # keepdims=True
        )
        attention_w = K.expand_dims(attention_w, -1)  # TODO remove and keepdims
        attention = K.sum(attention_w * attended, axis=1)

        return attention, kappa
项目:loss-correction    作者:giorgiop    | 项目源码 | 文件源码
def robust(name, P):

    if name == 'backward':
        P_inv = K.constant(np.linalg.inv(P))

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(K.dot(y_true, P_inv) * K.log(y_pred), axis=-1)

    elif name == 'forward':
        P = K.constant(P)

        def loss(y_true, y_pred):
            y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
            y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
            return -K.sum(y_true * K.log(K.dot(y_pred, P)), axis=-1)

    return loss
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def softmax(x, axis, mask=None):
    if mask is None:
        mask = K.constant(True)
    mask = K.cast(mask, K.floatx())
    if K.ndim(x) is K.ndim(mask) + 1:
        mask = K.expand_dims(mask)

    m = K.max(x, axis=axis, keepdims=True)
    e = K.exp(x - m) * mask
    s = K.sum(e, axis=axis, keepdims=True)
    s += K.cast(K.cast(s < K.epsilon(), K.floatx()) * K.epsilon(), K.floatx())
    return e / s
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def weighted_with_attention(self, inputs):
        """Define a function for a lambda layer of a model."""

        inp, inp_cont = inputs
        val = np.eye(self.max_sequence_length)
        kcon = K.constant(value=val, dtype='float32')
        diag = K.repeat_elements(inp_cont, self.max_sequence_length, 2) * kcon
        return K.batch_dot(diag, K.permute_dimensions(inp, (0,2,1)), axes=[1,2])
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_full_matching_layer_b(self, input_dim_a, input_dim_b):
        """Create a full-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        val = np.concatenate((np.ones((1, 1)), np.zeros((self.max_sequence_length - 1, 1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0, 2, 1)))(inp_b_perm)
        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_last = Lambda(lambda x: x * W[i])(last_state)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_last = Lambda(lambda x: K.l2_normalize(x, -1))(outp_last)
            outp_last = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_last)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_maxatt_matching_layer(self, input_dim_a, input_dim_b):
        """Create a max-attentive-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))

        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a)
        outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b)
        outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b)
        alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
        alpha = Lambda(lambda x: K.one_hot(K.argmax(x, 1), self.max_sequence_length))(alpha)
        hmax = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b])

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_hmax = Lambda(lambda x: x * W[i])(hmax)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_hmax = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmax)
            outp_hmax = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmax)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmax, outp_a])
            val = np.eye(self.max_sequence_length)
            kcon = K.constant(value=val, dtype='float32')
            outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def terminal_f(self, inp):
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp)
        return K.squeeze(last_state, 1)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def terminal_b(self, inp):
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp)
        return K.squeeze(last_state, 1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_last():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_first():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_first')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [3, input_dim, input_dim]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:pic2vec    作者:datarobot    | 项目源码 | 文件源码
def test_splice_layer():
    """Test method splices tensors correctly"""
    # Create spliced and added layers via splicing function
    list_of_spliced_layers = _splice_layer(SPLICING_TENSOR, 3)
    # Add each of the layers together
    x = add(list_of_spliced_layers)
    # Create the spliced and added layers by hand
    check_layer = K.constant(9, shape=(3, 4))
    # Check the math
    assert np.allclose(K.eval(check_layer), K.eval(x), atol=ATOL)
项目:pic2vec    作者:datarobot    | 项目源码 | 文件源码
def test_find_pooling_constant():
    """Test that pooling constant given correct answer with good inputs"""
    assert _find_pooling_constant(POOLING_FEATURES, 6) == 10
项目:axolotl    作者:tomasreimers    | 项目源码 | 文件源码
def in_distance(y_true, y_pred):
    y_error = y_true - y_pred
    y_error_normalized = (y_error) / 2 # the width is currently 2 (as the coordinates are [-1, 1])
    y_scaled_error = K.dot(y_error_normalized, K.constant(np.array([[IPHONE_WIDTH, 0], [0, IPHONE_HEIGHT]])))
    y_distance_sq = K.sum(K.square(y_scaled_error), axis=-1)
    y_distance = K.sqrt(y_distance_sq)
    return y_distance
项目:cyclegan_keras    作者:shadySource    | 项目源码 | 文件源码
def discriminator_loss(y_true, y_pred):
    loss = mean_squared_error(y_true, y_pred)
    is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold
    is_large = k.cast(is_large, k.floatx())
    return loss * is_large # binary threshold the loss to prevent overtraining the discriminator
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def softmax(x, axis, mask=None):
    if mask is None:
        mask = K.constant(True)
    mask = K.cast(mask, K.floatx())
    if K.ndim(x) is K.ndim(mask) + 1:
        mask = K.expand_dims(mask)

    m = K.max(x, axis=axis, keepdims=True)
    e = K.exp(x - m) * mask
    s = K.sum(e, axis=axis, keepdims=True)
    s += K.cast(K.cast(s < K.epsilon(), K.floatx()) * K.epsilon(), K.floatx())
    return e / s
项目:PhasedLSTM-Keras    作者:fferroni    | 项目源码 | 文件源码
def _timegate_init(shape, dtype=None):
        assert len(shape)==2
        return K.constant(np.vstack((np.random.uniform(10, 100, shape[1]),
                      np.random.uniform(0, 1000, shape[1]),
                      np.zeros(shape[1]) + 0.05)),
                      dtype=dtype)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_att_matching_layer(self, input_dim_a, input_dim_b):
        """Create an attentive-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))

        w = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            w.append(wi)

        outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a)
        outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b)
        outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b)
        alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
        alpha = Lambda(lambda x: K.l2_normalize(x, 1))(alpha)
        hmean = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b])

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * w[i])(inp_a)
            outp_hmean = Lambda(lambda x: x * w[i])(hmean)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_hmean = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmean)
            outp_hmean = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmean)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmean, outp_a])
            val = np.eye(self.max_sequence_length)
            kcon = K.constant(value=val, dtype='float32')
            outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def build(self, input_shape):
        assert len(input_shape) == 5
        if self.data_format == 'channels_first':
            channel_axis = 2
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')

        self.delays = input_shape[1]
        input_dim = input_shape[channel_axis]
        spatial_kernel_shape = self.spatial_kernel_size + (input_dim, 2*self.filters_complex + 2*self.filters_simple)

        self.spatial_kernel = self.add_weight(spatial_kernel_shape,
                                              initializer=self.spatial_kernel_initializer,
                                              name='spatial_kernel',
                                              regularizer=self.spatial_kernel_regularizer,
                                              constraint=self.spatial_kernel_constraint)

        self.temporal_kernel = K.pattern_broadcast(self.add_weight((self.delays, 1, self.filters_temporal),
                                               initializer=self.temporal_kernel_initializer,
                                               name='temporal_kernel',
                                               regularizer=self.temporal_kernel_regularizer,
                                               constraint=self.temporal_kernel_constraint), [False, True, False])

        if self.use_bias:
            self.bias = self.add_weight((self.filters_complex + self.filters_simple,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.temporal_freqs = K.pattern_broadcast(self.add_weight((self.temporal_frequencies, self.temporal_frequencies_initial_max/self.temporal_frequencies_scaling),
                                              initializer=step_init,
                                              name='temporal_frequencies',
                                              regularizer=self.temporal_frequencies_regularizer,
                                              constraint=self.temporal_frequencies_constraint), [True, False, True])

        self.delays_pi = K.pattern_broadcast(K.constant(2 * np.pi * np.arange(0, 1 + 1. / (self.delays - 1), 1. / (self.delays - 1))[:self.delays][:, None, None], name='delays_pi'), [False, True, True])

        self.WT = K.zeros((4*self.delays, 3*self.filters_temporal*self.temporal_frequencies))

        # Set input spec.
        self.input_spec = InputSpec(ndim=5,
                                    axes={channel_axis: input_dim})
        self.built = True
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, filters_simple,
                 filters_complex,
                 kernel_size,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 activation='relu',
                 padding='valid',
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 data_format=K.image_data_format(),
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 use_bias=True,
                 **kwargs):

        if padding not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2DEnergy_Scatter:', padding)
        self.filters_simple = filters_simple
        self.filters_complex = filters_complex
        self.kernel_size = kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.activation = activations.get(activation)
        assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
        self.padding = padding
        self.strides = strides
        self.dilation_rate = dilation_rate
        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
        self.data_format = data_format

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.UnitNormOrthogonal(filters_complex, singles=True)
        self.bias_constraint = constraints.get(bias_constraint)

        self.epsilon = K.constant(K.epsilon())

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        super(Convolution2DEnergy_Scatter, self).__init__(**kwargs)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_middle_layer_output(stock_path):


    stock_features, stock_target = stock_csv_features_targets(stock_path)

    ##############################
    # when wp is needed
    ##############################
    # global wp # must to make the following line work
    # wp = wp.load_model("/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed")
    # preds1 = wp.predict(stock_features) # normal final output range (0, 1)
    # plt.hist(preds1)
    # plt.show()


    # no need wp here
    model_path = "/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed"
    # model_path = "/Users/Natsume/Desktop/best_model_in_training.h5"
    wp_best = load_model(model_path)
    preds2 = wp_best.predict(stock_features)

    # access last second layer () output in test mode range(-23, 25)
    out_last_second = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-2].output])([stock_features, 0])[0]
    plt.hist(out_last_second)
    plt.show()

    # access last third layer (dense layer) output in test mode range(-2, 1.4)
    out_last_third_dense = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-3].output])([stock_features, 0])[0]

    # see distribution of out_last_third_dense
    plt.hist(out_last_third_dense)
    plt.show()

    # apply K.sigmoid to out_last_third_dense, and plot distribution
    dense_tensor = K.constant(out_last_third_dense)
    sigmoid_tensor = K.sigmoid(dense_tensor)
    import tensorflow as tf
    sess = tf.Session()
    dense_sigmoid_out = sess.run(sigmoid_tensor)
    plt.hist(dense_sigmoid_out)
    plt.show()
    # plot dense_sigmoid_out as lines
    plt.plot(dense_sigmoid_out)
    plt.show()



    pred_capital_pos = np.reshape(dense_sigmoid_out, (-1, 1))
    true_price_change = np.reshape(stock_target, (-1, 1))

    preds_target = np.concatenate((pred_capital_pos, true_price_change), axis=1)

    return preds_target
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_middle_layer_output(stock_path):


    stock_features, stock_target = stock_csv_features_targets(stock_path)

    ##############################
    # when wp is needed
    ##############################
    # global wp # must to make the following line work
    # wp = wp.load_model("/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed")
    # preds1 = wp.predict(stock_features) # normal final output range (0, 1)
    # plt.hist(preds1)
    # plt.show()


    # no need wp here
    model_path = "/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed"
    # model_path = "/Users/Natsume/Desktop/best_model_in_training.h5"
    wp_best = load_model(model_path)
    preds2 = wp_best.predict(stock_features)

    # access last second layer () output in test mode range(-23, 25)
    out_last_second = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-2].output])([stock_features, 0])[0]
    plt.hist(out_last_second)
    plt.show()

    # access last third layer (dense layer) output in test mode range(-2, 1.4)
    out_last_third_dense = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-3].output])([stock_features, 0])[0]

    # see distribution of out_last_third_dense
    plt.hist(out_last_third_dense)
    plt.show()

    # apply K.sigmoid to out_last_third_dense, and plot distribution
    dense_tensor = K.constant(out_last_third_dense)
    sigmoid_tensor = K.sigmoid(dense_tensor)
    import tensorflow as tf
    sess = tf.Session()
    dense_sigmoid_out = sess.run(sigmoid_tensor)
    plt.hist(dense_sigmoid_out)
    plt.show()
    # plot dense_sigmoid_out as lines
    plt.plot(dense_sigmoid_out)
    plt.show()



    pred_capital_pos = np.reshape(dense_sigmoid_out, (-1, 1))
    true_price_change = np.reshape(stock_target, (-1, 1))

    preds_target = np.concatenate((pred_capital_pos, true_price_change), axis=1)

    return preds_target
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_middle_layer_output(stock_path):


    stock_features, stock_target = stock_csv_features_targets(stock_path)

    ##############################
    # when wp is needed
    ##############################
    # global wp # must to make the following line work
    # wp = wp.load_model("/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed")
    # preds1 = wp.predict(stock_features) # normal final output range (0, 1)
    # plt.hist(preds1)
    # plt.show()


    # no need wp here
    model_path = "/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed"
    # model_path = "/Users/Natsume/Desktop/best_model_in_training.h5"
    wp_best = load_model(model_path)
    preds2 = wp_best.predict(stock_features)

    # access last second layer () output in test mode range(-23, 25)
    out_last_second = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-2].output])([stock_features, 0])[0]
    plt.hist(out_last_second)
    plt.show()

    # access last third layer (dense layer) output in test mode range(-2, 1.4)
    out_last_third_dense = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-3].output])([stock_features, 0])[0]

    # see distribution of out_last_third_dense
    plt.hist(out_last_third_dense)
    plt.show()

    # apply K.sigmoid to out_last_third_dense, and plot distribution
    dense_tensor = K.constant(out_last_third_dense)
    sigmoid_tensor = K.sigmoid(dense_tensor)
    import tensorflow as tf
    sess = tf.Session()
    dense_sigmoid_out = sess.run(sigmoid_tensor)
    plt.hist(dense_sigmoid_out)
    plt.show()
    # plot dense_sigmoid_out as lines
    plt.plot(dense_sigmoid_out)
    plt.show()



    pred_capital_pos = np.reshape(dense_sigmoid_out, (-1, 1))
    true_price_change = np.reshape(stock_target, (-1, 1))

    preds_target = np.concatenate((pred_capital_pos, true_price_change), axis=1)

    return preds_target
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_middle_layer_output(stock_path):


    stock_features, stock_target = stock_csv_features_targets(stock_path)

    ##############################
    # when wp is needed
    ##############################
    # global wp # must to make the following line work
    # wp = wp.load_model("/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed")
    # preds1 = wp.predict(stock_features) # normal final output range (0, 1)
    # plt.hist(preds1)
    # plt.show()


    # no need wp here
    model_path = "/Users/Natsume/Downloads/data_for_all/stocks/best_models_trained/model.30.best.bias_removed"
    # model_path = "/Users/Natsume/Desktop/best_model_in_training.h5"
    wp_best = load_model(model_path)
    preds2 = wp_best.predict(stock_features)

    # access last second layer () output in test mode range(-23, 25)
    out_last_second = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-2].output])([stock_features, 0])[0]
    plt.hist(out_last_second)
    plt.show()

    # access last third layer (dense layer) output in test mode range(-2, 1.4)
    out_last_third_dense = K.function([wp_best.input, K.learning_phase()], [wp_best.layers[-3].output])([stock_features, 0])[0]

    # see distribution of out_last_third_dense
    plt.hist(out_last_third_dense)
    plt.show()

    # apply K.sigmoid to out_last_third_dense, and plot distribution
    dense_tensor = K.constant(out_last_third_dense)
    sigmoid_tensor = K.sigmoid(dense_tensor)
    import tensorflow as tf
    sess = tf.Session()
    dense_sigmoid_out = sess.run(sigmoid_tensor)
    plt.hist(dense_sigmoid_out)
    plt.show()
    # plot dense_sigmoid_out as lines
    plt.plot(dense_sigmoid_out)
    plt.show()



    pred_capital_pos = np.reshape(dense_sigmoid_out, (-1, 1))
    true_price_change = np.reshape(stock_target, (-1, 1))

    preds_target = np.concatenate((pred_capital_pos, true_price_change), axis=1)

    return preds_target