Python keras.backend 模块,batch_dot() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.batch_dot()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="attention_generator")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_f(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.zeros((self.max_sequence_length-1,1)), np.ones((1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_forw")
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_attention_layer_b(self, input_dim_a, input_dim_b):
        """Create an attention layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        val = np.concatenate((np.ones((1,1)), np.zeros((self.max_sequence_length-1,1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0,2,1)))(inp_b_perm)
        ker_in = glorot_uniform(seed=self.seed)
        outp_a = Dense(self.attention_dim, input_shape=(input_dim_a, self.hidden_dim),
                       kernel_initializer=ker_in, activation='relu')(inp_a)
        outp_last = Dense(self.attention_dim, input_shape=(1, self.hidden_dim),
                          kernel_initializer=ker_in, activation='relu')(last_state)
        outp_last_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_last)
        outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last_perm, outp_a])
        outp_norm = Activation('softmax')(outp)
        outp_norm_perm = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_norm)
        model = Model(inputs=[inp_a, inp_b], outputs=outp_norm_perm, name="att_generator_back")
        return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:dense_tensor    作者:bstriner    | 项目源码 | 文件源码
def tensor_factorization_symmetric(q,
                                   alpha=1e-7,
                                   beta=1.0,
                                   tensor_initializer='uniform',
                                   tensor_regularizer=None,
                                   tensor_constraint=None):
    """
    :param q: rank of inner parameter
    :param alpha: scale of eye to add. 0=pos/neg semidefinite, >0=pos/neg definite
    :param beta: multiplier of tensor. 1=positive,-1=negative
    """

    def fun(layer, units, input_dim, name):
        Q = add_weight(layer=layer,
                       initializer=tensor_initializer,
                       regularizer=tensor_regularizer,
                       constraint=tensor_constraint,
                       shape=(units, q, input_dim),
                       name=name)  # units, input_dim, q
        tmp = K.batch_dot(Q, Q, axes=[[1], [1]])  # p,m,q + p,m,q = p,m,m
        V = beta * ((eye(input_dim, input_dim) * alpha) + tmp)  # m,p,p
        return [q], V

    return fun
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def semantic_matrix(argv):
    assert len(argv) == 2
    q = argv[0]
    a = argv[1]
    q_sqrt = K.sqrt((q ** 2).sum(axis=2, keepdims=True))
    a_sqrt = K.sqrt((a ** 2).sum(axis=2, keepdims=True))
    denominator = K.batch_dot(q_sqrt, K.permute_dimensions(a_sqrt, [0,2,1]))
    return K.batch_dot(q, K.permute_dimensions(a, [0,2,1])) / (denominator + SAFE_EPSILON)

# ??idx??????
# ??????batch index????????
# ??https://groups.google.com/forum/#!topic/theano-users/7gUdN6E00Dc
# ??argmax???2 - axis
# ??theano??a > 0????????[1,1,0]?????????????
# ?bool???????????
# ??????????T.set_subtensor(ib[(ib < 0).nonzero()], 0)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:ntm_keras    作者:flomlo    | 项目源码 | 文件源码
def _get_weight_vector(self, M, w_tm1, k, beta, g, s, gamma):
#        M = tf.Print(M, [M, w_tm1, k], message='get weights beg1: ')
#        M = tf.Print(M, [beta, g, s, gamma], message='get weights beg2: ')
        # Content adressing, see Chapter 3.3.1:
        num = beta * _cosine_distance(M, k)
        w_c  = K.softmax(num) # It turns out that equation (5) is just softmax.
        # Location adressing, see Chapter 3.3.2:
        # Equation 7:
        w_g = (g * w_c) + (1-g)*w_tm1
        # C_s is the circular convolution
        #C_w = K.sum((self.C[None, :, :, :] * w_g[:, None, None, :]),axis=3)
        # Equation 8:
        # TODO: Explain
        C_s = K.sum(K.repeat_elements(self.C[None, :, :, :], self.batch_size, axis=0) * s[:,:,None,None], axis=1)
        w_tilda = K.batch_dot(C_s, w_g)
        # Equation 9:
        w_out = _renorm(w_tilda ** gamma)

        return w_out
项目:QA-System    作者:soloice    | 项目源码 | 文件源码
def build_mdl(len_words, embed_dim, embeds, len_sent1, len_sent2):
    embeds.insert(0, np.zeros(embeds[0].shape, dtype='float32'))  # for padding

    input_q = Input(shape=(len_sent1,), dtype='int32')
    input_a = Input(shape=(len_sent2,), dtype='int32')
    embed = Embedding(mask_zero=True, input_dim=len_words+1, output_dim=embed_dim,
                      weights=[np.array(embeds)], dropout=0.2)
    x_q = embed(input_q)
    x_a = embed(input_a)
    rnn_q = LSTM(64, input_dim=embed_dim, return_sequences=False, input_length=len_sent1)(x_q)
    rnn_a = LSTM(64, input_dim=embed_dim, return_sequences=False, input_length=len_sent2)(x_a)
    dense_q = Dense(32)(rnn_q)
    dense_a = Dense(32)(rnn_a)

    def cosine(x):
        axis = len(x[0]._keras_shape) - 1
        dot = lambda a, b: K.batch_dot(a, b, axes=axis)
        return dot(x[0], x[1]) / K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1]))

    # https://github.com/fchollet/keras/issues/2299
    cosine_sim = merge([dense_q, dense_a], mode=cosine, output_shape=(1,))
    model = Model(input=[input_q, input_a], output=[cosine_sim])
    model.compile(optimizer='rmsprop', loss='mse')
    return model
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def call(self, x, mask=None):
        input_, flow_layer_ = x
        stride_row, stride_col = self.subsample
        shape = input_._keras_shape
        output_row = shape[1] - self.kernel_size + 1
        output_col = shape[2] - self.kernel_size + 1
        xs = []
        ws = []
        for i in range(output_row):
            for j in range(output_col):
                slice_row = slice(i * stride_row,
                                  i * stride_row + self.kernel_size)
                slice_col = slice(j * stride_col,
                                  j * stride_col + self.kernel_size)
                xs.append(K.reshape(input_[:, slice_row, slice_col, :],
                                    (1, -1, self.kernel_size ** 2, shape[-1])))
                ws.append(K.reshape(flow_layer_[:, i, j, :], (1, -1, self.kernel_size ** 2, 1)))
        x_aggregate = K.concatenate(xs, axis=0)
        x_aggregate = K.permute_dimensions(x_aggregate, (0, 1, 3, 2))
        W = K.concatenate(ws, axis=0)
        output = K.batch_dot(x_aggregate, W)
        output = K.reshape(output, (output_row, output_col, -1, shape[3]))
        output = K.permute_dimensions(output, (2, 0, 1, 3))
        output = self.activation(output)
        return output
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride = self.subsample_length
        output_length, feature_dim, nb_filter = self.W_shape

        xs = []
        for i in range(output_length):
            slice_length = slice(i * stride, i * stride + self.filter_length)
            xs.append(K.reshape(x[:, slice_length, :], (1, -1, feature_dim)))
        x_aggregate = K.concatenate(xs, axis=0)
        # (output_length, batch_size, nb_filter)
        output = K.batch_dot(x_aggregate, self.W)
        output = K.permute_dimensions(output, (1, 0, 2))

        if self.bias:
            output += K.reshape(self.b, (1, output_length, nb_filter))

        output = self.activation(output)
        return output
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        vP_t = inputs
        hP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks 
        vP, WP_v, WPP_v, v, W_g2 = states[3:8]
        vP_mask, = states[8:]

        WP_v_Dot = K.dot(vP, WP_v)
        WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v)

        s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot)
        s_t = K.dot(s_t_hat, v)
        s_t = K.batch_flatten(s_t)

        a_t = softmax(s_t, mask=vP_mask, axis=1)

        c_t = K.batch_dot(a_t, vP, axes=[1, 1])

        GRU_inputs = K.concatenate([vP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g2))
        GRU_inputs = g * GRU_inputs

        hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states)

        return hP_t, s
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        # input
        ha_tm1 = states[0] # (B, 2H)
        _ = states[1:3] # ignore internal dropout/masks
        hP, WP_h, Wa_h, v = states[3:7] # (B, P, 2H)
        hP_mask, = states[7:8]

        WP_h_Dot = K.dot(hP, WP_h) # (B, P, H)
        Wa_h_Dot = K.dot(K.expand_dims(ha_tm1, axis=1), Wa_h) # (B, 1, H)

        s_t_hat = K.tanh(WP_h_Dot + Wa_h_Dot) # (B, P, H)
        s_t = K.dot(s_t_hat, v) # (B, P, 1)
        s_t = K.batch_flatten(s_t) # (B, P)
        a_t = softmax(s_t, mask=hP_mask, axis=1) # (B, P)
        c_t = K.batch_dot(hP, a_t, axes=[1, 1]) # (B, 2H)

        GRU_inputs = c_t
        ha_t, (ha_t_,) = super(PointerGRU, self).step(GRU_inputs, states)

        return a_t, [ha_t]
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        assert(isinstance(inputs, list) and len(inputs) == 5)
        uQ, WQ_u, WQ_v, v, VQ_r = inputs
        uQ_mask = mask[0] if mask is not None else None

        ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H)
        s_hat = K.dot(uQ, WQ_u)
        s_hat += K.dot(ones, K.dot(WQ_v, VQ_r))
        s_hat = K.tanh(s_hat)
        s = K.dot(s_hat, v)
        s = K.batch_flatten(s)

        a = softmax(s, mask=uQ_mask, axis=1)

        rQ = K.batch_dot(uQ, a, axes=[1, 1])

        return rQ
项目:R-NET-in-Keras    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, inputs, states):
        uP_t = inputs
        vP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks
        uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9]
        uQ_mask, = states[9:10]

        WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u
        WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v
        WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u

        s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot)

        s_t = K.dot(s_t_hat, v) # v
        s_t = K.batch_flatten(s_t)
        a_t = softmax(s_t, mask=uQ_mask, axis=1)
        c_t = K.batch_dot(a_t, uQ, axes=[1, 1])

        GRU_inputs = K.concatenate([uP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g1))  # W_g1
        GRU_inputs = g * GRU_inputs
        vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states)

        return vP_t, s
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd2(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,1])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd3(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[1,2])
    return result
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def bd4(inputs):
    x,y = inputs
    result = K.batch_dot(x,y,axes=[2,2])
    return result
项目:siamese_sentiment    作者:jcavalieri8619    | 项目源码 | 文件源码
def euclidDist(inputs):
    assert len(inputs) == 2, "euclidDist requires 2 inputs"
    l1 = inputs[0]
    l2 = inputs[1]
    x = l1 - l2
    output = K.batch_dot(x, x, axes=1)
    K.reshape(output, (1,))
    return output
项目:siamese_sentiment    作者:jcavalieri8619    | 项目源码 | 文件源码
def squaredl2(X):
    output = K.batch_dot(X, X, axes=1)
    K.reshape(output, (1,))
    return output
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def weighted_with_attention(self, inputs):
        """Define a function for a lambda layer of a model."""

        inp, inp_cont = inputs
        val = np.eye(self.max_sequence_length)
        kcon = K.constant(value=val, dtype='float32')
        diag = K.repeat_elements(inp_cont, self.max_sequence_length, 2) * kcon
        return K.batch_dot(diag, K.permute_dimensions(inp, (0,2,1)), axes=[1,2])
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def weight_and_reduce(self, inputs):
        """Define a function for a lambda layer of a model."""

        inp, inp_cont = inputs
        reduced = K.batch_dot(inp_cont,
                              K.permute_dimensions(inp, (0,2,1)), axes=[1,2])
        return K.squeeze(reduced, 1)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_full_matching_layer_b(self, input_dim_a, input_dim_b):
        """Create a full-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        val = np.concatenate((np.ones((1, 1)), np.zeros((self.max_sequence_length - 1, 1))), axis=0)
        kcon = K.constant(value=val, dtype='float32')
        inp_b_perm = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(inp_b)
        last_state = Lambda(lambda x: K.permute_dimensions(K.dot(x, kcon), (0, 2, 1)))(inp_b_perm)
        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_last = Lambda(lambda x: x * W[i])(last_state)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_last = Lambda(lambda x: K.l2_normalize(x, -1))(outp_last)
            outp_last = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_last)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_last, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_maxpool_matching_layer(self, input_dim_a, input_dim_b):
        """Create a maxpooling-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))
        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_b = Lambda(lambda x: x * W[i])(inp_b)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(outp_b)
            outp_b = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp_b)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
            outp = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(outp)
            outp = Lambda(lambda x: K.max(x, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def create_maxatt_matching_layer(self, input_dim_a, input_dim_b):
        """Create a max-attentive-matching layer of a model."""

        inp_a = Input(shape=(input_dim_a, self.hidden_dim,))
        inp_b = Input(shape=(input_dim_b, self.hidden_dim,))

        W = []
        for i in range(self.perspective_num):
            wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0,
                                           seed=self.seed if self.seed is not None else 243)
            W.append(wi)

        outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a)
        outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b)
        outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b)
        alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a])
        alpha = Lambda(lambda x: K.one_hot(K.argmax(x, 1), self.max_sequence_length))(alpha)
        hmax = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b])

        m = []
        for i in range(self.perspective_num):
            outp_a = Lambda(lambda x: x * W[i])(inp_a)
            outp_hmax = Lambda(lambda x: x * W[i])(hmax)
            outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a)
            outp_hmax = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmax)
            outp_hmax = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmax)
            outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmax, outp_a])
            val = np.eye(self.max_sequence_length)
            kcon = K.constant(value=val, dtype='float32')
            outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp)
            m.append(outp)
        if self.perspective_num > 1:
            persp = Lambda(lambda x: K.concatenate(x, 2))(m)
        else:
            persp = m
        model = Model(inputs=[inp_a, inp_b], outputs=persp)
        return model
项目:dense_tensor    作者:bstriner    | 项目源码 | 文件源码
def tensor_factorization_low_rank(q,
                                  tensor_initializer='uniform',
                                  tensor_regularizer=None,
                                  tensor_constraint=None):
    def fun(layer, units, input_dim, name):
        qs = [add_weight(layer=layer,
                         initializer=tensor_initializer,
                         regularizer=tensor_regularizer,
                         constraint=tensor_constraint,
                         shape=(units, q, input_dim),
                         name="{}_Q{}".format(name, i)) for i in range(2)]
        V = K.batch_dot(qs[0], qs[1], axes=[[1], [1]])  # p,m,q + p,q,m = p,m,m
        return qs, V

    return fun