Python theano.tensor 模块,itensor3() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用theano.tensor.itensor3()

项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def symbolic_input_variables(self):
        features = tensor.tensor3('features')
        features_mask = tensor.matrix('features_mask')
        labels = tensor.imatrix('labels')
        labels_mask = tensor.matrix('labels_mask')

        start_flag = tensor.scalar('start_flag')

        if self.use_speaker:
            speaker = tensor.imatrix('speaker_index')
        else:
            speaker = None

        if self.raw_output:
            raw_sequence = tensor.itensor3('raw_audio')
        else:
            raw_sequence = None

        return features, features_mask, labels, labels_mask, \
            speaker, start_flag, raw_sequence
项目:recursive_WSABIE    作者:ktsaurabh    | 项目源码 | 文件源码
def __init__(self, input_dim, proj_dim=128, neg_samples = 4,
                 init='uniform', activation='sigmoid', weights=None, W_regularizer = None, activity_regularizer = None, **kwargs):

        super(WordTagContextProduct, self).__init__(**kwargs)
        self.input_dim = input_dim
        self.proj_dim = proj_dim
        self.samples = neg_samples + 1
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input = [T.itensor3(), T.itensor3()]
        self.W_w = self.init((input_dim, proj_dim))
        self.params = [self.W_w]
        if weights is not None:
            self.set_weights(weights)
项目:recursive_WSABIE    作者:ktsaurabh    | 项目源码 | 文件源码
def __init__(self, input_dim, proj_dim=128, neg_samples = 4,tensor_slices = 4, slice_dim = 16,
                 init='uniform', activation='tanh', weights=None,W_regularizer = None, activity_regularizer=None,   **kwargs):

        super(WordTagContextProduct_tensor, self).__init__(**kwargs)
        self.input_dim = input_dim
        self.proj_dim = proj_dim
        self.samples = neg_samples + 1
        #np.random.seed(0)
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input = [T.itensor3(), T.itensor3()]
        self.W_w = self.init((input_dim, proj_dim))
        self.tensor_slices = tensor_slices
        self.slice_dim = slice_dim
        self.params = [self.W_w]

        if weights is not None:
            self.set_weights(weights)
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def T_one_hot(inp_tensor, n_classes):
    """
    :todo:
        - Implement other methods from here: 
        - Compare them speed-wise for different sizes
        - Implement N_one_hot for Numpy version, with speed tests.

    Theano one-hot (1-of-k) from an input tensor of indecies.
    If the indecies are of the shape (a0, a1, ..., an) the output
    shape would be (a0, a1, ..., a2, n_classes).

    :params:
        - inp_tensor: any theano tensor with dtype int* as indecies and all of
                      them between [0, n_classes-1].
        - n_classes: number of classes which determines the output size.

    :usage:
        >>> idx = T.itensor3()
        >>> idx_val = numpy.array([[[0,1,2,3],[4,5,6,7]]], dtype='int32')
        >>> one_hot = T_one_hot(t, 8)
        >>> one_hot.eval({idx:idx_val})
        >>> print out
        array([[[[ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.]],
        [[ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.]]]])
        >>> print idx_val.shape, out.shape
        (1, 2, 4) (1, 2, 4, 8)
    """
    flattened = inp_tensor.flatten()
    z = T.zeros((flattened.shape[0], n_classes), dtype=theano.config.floatX)
    one_hot = T.set_subtensor(z[T.arange(flattened.shape[0]), flattened], 1)
    out_shape = [inp_tensor.shape[i] for i in xrange(inp_tensor.ndim)] + [n_classes]
    one_hot = one_hot.reshape(out_shape)
    return one_hot
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def ndim_itensor(ndim, name=None):
    if ndim == 2:
        return T.imatrix(name)
    elif ndim == 3:
        return T.itensor3(name)
    elif ndim == 4:
        return T.itensor4(name)
    return T.imatrix(name)


# dot-product
项目:seq2seq-keyphrase    作者:memray    | 项目源码 | 文件源码
def ndim_itensor(ndim, name=None):
    if ndim == 2:
        return T.imatrix(name)
    elif ndim == 3:
        return T.itensor3(name)
    elif ndim == 4:
        return T.itensor4(name)
    return T.imatrix(name)


# dot-product
项目:NCRF-AE    作者:cosmozhang    | 项目源码 | 文件源码
def __init__(self, rng, embeddings, char_embeddings, hiddensize, char_hiddensize, embedding_dim, char_embedding_dim, window_size, num_tags, dic_size, dropout_rate = 0.7):
        self.rng = rng
        self.inputX = T.imatrix('inputX') # a sentence, shape (T * window_size)
        self.inputX_chars = T.itensor3('inputX_chars') # a sentence, shape (T * max numbe of chars in a word)
        self.inputY = T.ivector('inputY') # tags of a sentence
        self.is_train = T.iscalar('is_train')

        self.new_theta = T.fmatrix('new_theta')

        self.dropout_rate = dropout_rate
        self.nhidden = hiddensize
        self.char_nhidden = char_hiddensize # for now set the number of hidden units the same
        self.embedding_dim = embedding_dim
        self.char_embedding_dim = char_embedding_dim
        self.window_size = window_size
        self.n_classes = num_tags
        self.dic_size = dic_size

        # for testing in compling
        self.inputX.tag.test_value = np.ones((10, window_size)).astype(np.int32)
        self.inputX_chars.tag.test_value = np.ones((10, window_size, 8)).astype(np.int32)
        self.inputY.tag.test_value = np.ones(10).astype(np.int32)

        self.Embeddings = theano.shared(value = embeddings, name = "Embeddings", borrow = True)
        self.Char_Embeddings = theano.shared(value = char_embeddings, name = "Char_Embeddings", borrow = True)

        # word embeddings
        self.inputW = self.Embeddings[self.inputX]

        # char embeddings
        self.inputC = self.Char_Embeddings[self.inputX_chars].dimshuffle([2, 0, 1, 3])

        self.params = [self.Embeddings, self.Char_Embeddings]
项目:senti    作者:stevenxxiu    | 项目源码 | 文件源码
def __init__(self, batch_size, emb_X, num_words, lstm_params, conv_param, output_size, f1_classes):
        super().__init__(batch_size)
        self.num_words = num_words
        self.inputs = [T.itensor3('input'), T.tensor3('mask')]
        self.target = T.ivector('target')
        l = InputLayer((batch_size, num_words, None), self.inputs[0])
        l_mask = InputLayer((batch_size, num_words, None), self.inputs[1])
        l = ReshapeLayer(l, (-1, [2]))
        l_mask = ReshapeLayer(l_mask, (-1, [2]))
        l = EmbeddingLayer(l, emb_X.shape[0], emb_X.shape[1], W=emb_X)
        for lstm_param in lstm_params:
            l = LSTMLayer(
                l, lstm_param, grad_clipping=100, nonlinearity=tanh, mask_input=l_mask, only_return_final=True
            )
        l = ReshapeLayer(l, (batch_size, num_words, -1))
        l_convs = []
        for filter_size in conv_param[1]:
            l_cur = Conv1DLayer(l, conv_param[0], filter_size, pad='full', nonlinearity=rectify)
            l_cur = MaxPool1DLayer(l_cur, num_words + filter_size - 1, ignore_border=True)
            l_cur = FlattenLayer(l_cur)
            l_convs.append(l_cur)
        l = ConcatLayer(l_convs)
        l = DropoutLayer(l)
        l = DenseLayer(l, output_size, nonlinearity=log_softmax)
        self.constraints[l.W] = lambda u, v: norm_constraint(v, 3)
        self.pred = T.exp(get_output(l, deterministic=True))
        self.loss = T.mean(categorical_crossentropy_exp(self.target, get_output(l)))
        params = get_all_params(l, trainable=True)
        self.updates = adadelta(self.loss, params)
        self.metrics = {'train': [acc], 'val': [acc, f1(f1_classes)]}
        self.network = l
        self.compile()
项目:senti    作者:stevenxxiu    | 项目源码 | 文件源码
def test_get_output_for(self):
        X = T.itensor3()
        X1 = np.empty((2, 2, 10), dtype='int32')
        for i, is_ in enumerate(itertools.product(*(range(n) for n in X1.shape[:-1]))):
            X1[is_] = np.arange(i, 10 + i)
        X2 = np.empty((2, 2, 3), dtype='int32')
        for i, is_ in enumerate(itertools.product(*(range(n) for n in X2.shape[:-1]))):
            X2[is_] = np.arange(7 + i, 10 + i)
        self.assertTrue(np.array_equal(
            theano.function([X], KMaxPool1DLayer(InputLayer((100, 100)), 3).get_output_for(X))(X1), X2
        ))
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def T_one_hot(inp_tensor, n_classes):
    """
    :todo:
        - Implement other methods from here:
        - Compare them for speed-wise for different sizes
        - Implement N_one_hot for Numpy version, with speed tests.

    Theano one-hot (1-of-k) from an input tensor of indecies.
    If the indecies are of the shape (a0, a1, ..., an) the output
    shape would be (a0, a1, ..., a2, n_classes).

    :params:
        - inp_tensor: any theano tensor with dtype int* as indecies and all of
                      them between [0, n_classes-1].
        - n_classes: number of classes which determines the output size.

    :usage:
        >>> idx = T.itensor3()
        >>> idx_val = numpy.array([[[0,1,2,3],[4,5,6,7]]], dtype='int32')
        >>> one_hot = T_one_hot(t, 8)
        >>> one_hot.eval({idx:idx_val})
        >>> print out
        array([[[[ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.]],
        [[ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.]]]])
        >>> print idx_val.shape, out.shape
        (1, 2, 4) (1, 2, 4, 8)
    """
    flattened = inp_tensor.flatten()
    z = T.zeros((flattened.shape[0], n_classes), dtype=theano.config.floatX)
    one_hot = T.set_subtensor(z[T.arange(flattened.shape[0]), flattened], 1)
    out_shape = [inp_tensor.shape[i] for i in xrange(inp_tensor.ndim)] + [n_classes]
    one_hot = one_hot.reshape(out_shape)
    return one_hot
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def __init__(self,n_input, n_hidden, n_output,
                 cell='gru', optimizer='sgd',p=0.1,
                 q_w=None,k=10,sampling='nce'):
        self.x = T.imatrix('batched_sequence_x')  # n_batch, maxlen
        self.x_mask = T.fmatrix('x_mask')
        self.y = T.imatrix('batched_sequence_y')
        self.y_mask = T.fmatrix('y_mask')
        # negy is the negative sampling for nce shape (len(y),k)
        self.negy = T.itensor3('negy')

        self.n_input=n_input
        self.n_hidden=n_hidden
        self.n_output=n_output

        self.k=k
        self.sampling=sampling
        self.optimizer=optimizer
        self.cell=cell
        self.optimizer=optimizer
        self.p=p
        self.is_train = T.iscalar('is_train')
        self.rng = RandomStreams(1234)


        init_Embd = np.asarray(
            np.random.uniform(low=-np.sqrt(1. / n_output), high=np.sqrt(1. / n_output), size=(n_output, n_input)),
            dtype=theano.config.floatX)

        self.E = theano.shared(value=init_Embd, name='word_embedding', borrow=True)



        self.q_w = theano.shared(value=q_w, name='vocabulary distribution', borrow=True)


        self.build()
项目:CopyNet    作者:MingyuanXie    | 项目源码 | 文件源码
def ndim_itensor(ndim, name=None):
    if ndim == 2:
        return T.imatrix(name)
    elif ndim == 3:
        return T.itensor3(name)
    elif ndim == 4:
        return T.itensor4(name)
    return T.imatrix(name)


# dot-product
项目:WebNav    作者:nyu-dl    | 项目源码 | 文件源码
def make_node(self, x, x2, x3, x4, x5):
        # check that the theano version has support for __props__.
        # This next line looks like it has a typo,
        # but it's actually a way to detect the theano version
        # is sufficiently recent to support the use of __props__.
        assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
        x = tensor.as_tensor_variable(x)
        x2 = tensor.as_tensor_variable(x2)
        x3 = tensor.as_tensor_variable(x3)
        x4 = tensor.as_tensor_variable(x4)
        x5 = tensor.as_tensor_variable(x5)

        if prm.att_doc:
            if prm.compute_emb:
                td = tensor.itensor4().type()
            else:
                td = tensor.ftensor4().type()
            tm = tensor.ftensor3().type()
        else:
            if prm.compute_emb:
                td = tensor.itensor3().type()
            else:
                td = tensor.ftensor3().type()
            tm = tensor.fmatrix().type()
        return theano.Apply(self, [x,x2,x3,x4,x5], [td, tm, \
                                           tensor.fmatrix().type(), tensor.ivector().type()])
项目:QueryReformulator    作者:nyu-dl    | 项目源码 | 文件源码
def make_node(self, x1, x2, x3, x4):
        assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
        x1 = tensor.as_tensor_variable(x1)
        x2 = tensor.as_tensor_variable(x2)
        x3 = tensor.as_tensor_variable(x3)
        x4 = tensor.as_tensor_variable(x4)
        out = [tensor.fmatrix().type(), tensor.itensor3().type(), tensor.imatrix().type(), tensor.fmatrix().type()]

        return theano.Apply(self, [x1, x2, x3, x4], out)
项目:ga-reader    作者:bdhingra    | 项目源码 | 文件源码
def __init__(self, K, vocab_size, num_chars, W_init, 
            nhidden, embed_dim, dropout, train_emb, char_dim, use_feat, gating_fn, 
            save_attn=False):
        self.nhidden = nhidden
        self.embed_dim = embed_dim
        self.dropout = dropout
        self.train_emb = train_emb
        self.char_dim = char_dim
        self.learning_rate = LEARNING_RATE
        self.num_chars = num_chars
        self.use_feat = use_feat
        self.save_attn = save_attn
        self.gating_fn = gating_fn

        self.use_chars = self.char_dim!=0
        if W_init is None: W_init = lasagne.init.GlorotNormal().sample((vocab_size, self.embed_dim))

        doc_var, query_var, cand_var = T.itensor3('doc'), T.itensor3('quer'), \
                T.wtensor3('cand')
        docmask_var, qmask_var, candmask_var = T.bmatrix('doc_mask'), T.bmatrix('q_mask'), \
                T.bmatrix('c_mask')
        target_var = T.ivector('ans')
        feat_var = T.imatrix('feat')
        doc_toks, qry_toks= T.imatrix('dchars'), T.imatrix('qchars')
        tok_var, tok_mask = T.imatrix('tok'), T.bmatrix('tok_mask')
        cloze_var = T.ivector('cloze')
        self.inps = [doc_var, doc_toks, query_var, qry_toks, cand_var, target_var, docmask_var,
                qmask_var, tok_var, tok_mask, candmask_var, feat_var, cloze_var]

        self.predicted_probs, predicted_probs_val, self.network, W_emb, attentions = (
                self.build_network(K, vocab_size, W_init))

        self.loss_fn = T.nnet.categorical_crossentropy(self.predicted_probs, target_var).mean()
        self.eval_fn = lasagne.objectives.categorical_accuracy(self.predicted_probs, 
                target_var).mean()

        loss_fn_val = T.nnet.categorical_crossentropy(predicted_probs_val, target_var).mean()
        eval_fn_val = lasagne.objectives.categorical_accuracy(predicted_probs_val, 
                target_var).mean()

        self.params = L.get_all_params(self.network, trainable=True)

        updates = lasagne.updates.adam(self.loss_fn, self.params, learning_rate=self.learning_rate)

        self.train_fn = theano.function(self.inps,
                [self.loss_fn, self.eval_fn, self.predicted_probs], 
                updates=updates,
                on_unused_input='warn')
        self.validate_fn = theano.function(self.inps, 
                [loss_fn_val, eval_fn_val, predicted_probs_val]+attentions,
                on_unused_input='warn')
项目:fg-gating    作者:kimiyoung    | 项目源码 | 文件源码
def __init__(self, K, vocab_size, num_chars, W_init, regularizer, rlambda, 
            nhidden, embed_dim, dropout, train_emb, subsample, char_dim, use_feat, feat_cnt,
            save_attn=False):
        self.nhidden = nhidden
        self.embed_dim = embed_dim
        self.dropout = dropout
        self.train_emb = train_emb
        self.subsample = subsample
        self.char_dim = char_dim
        self.learning_rate = LEARNING_RATE
        self.num_chars = num_chars
        self.use_feat = use_feat
        self.feat_cnt = feat_cnt
        self.save_attn = save_attn

        norm = lasagne.regularization.l2 if regularizer=='l2' else lasagne.regularization.l1
        self.use_chars = self.char_dim!=0
        if W_init is None: W_init = lasagne.init.GlorotNormal().sample((vocab_size, self.embed_dim))

        doc_var, query_var, cand_var = T.itensor3('doc'), T.itensor3('quer'), \
                T.wtensor3('cand')
        docmask_var, qmask_var, candmask_var = T.bmatrix('doc_mask'), T.bmatrix('q_mask'), \
                T.bmatrix('c_mask')
        target_var = T.ivector('ans')
        feat_var = T.imatrix('feat')
        doc_toks, qry_toks= T.imatrix('dchars'), T.imatrix('qchars')
        tok_var, tok_mask = T.imatrix('tok'), T.bmatrix('tok_mask')
        cloze_var = T.ivector('cloze')
        match_feat_var = T.itensor3('match_feat')
        use_char_var = T.tensor3('use_char')
        use_char_q_var = T.tensor3('use_char_q')
        self.inps = [doc_var, doc_toks, query_var, qry_toks, cand_var, target_var, docmask_var,
                qmask_var, tok_var, tok_mask, candmask_var, feat_var, cloze_var, match_feat_var, use_char_var, use_char_q_var]

        if rlambda> 0.: W_pert = W_init + lasagne.init.GlorotNormal().sample(W_init.shape)
        else: W_pert = W_init
        self.predicted_probs, predicted_probs_val, self.network, W_emb, attentions = (
                self.build_network(K, vocab_size, W_pert))

        self.loss_fn = T.nnet.categorical_crossentropy(self.predicted_probs, target_var).mean() + \
                rlambda*norm(W_emb-W_init)
        self.eval_fn = lasagne.objectives.categorical_accuracy(self.predicted_probs, 
                target_var).mean()

        loss_fn_val = T.nnet.categorical_crossentropy(predicted_probs_val, target_var).mean() + \
                rlambda*norm(W_emb-W_init)
        eval_fn_val = lasagne.objectives.categorical_accuracy(predicted_probs_val, 
                target_var).mean()

        self.params = L.get_all_params(self.network, trainable=True)

        updates = lasagne.updates.adam(self.loss_fn, self.params, learning_rate=self.learning_rate)

        self.train_fn = theano.function(self.inps,
                [self.loss_fn, self.eval_fn, self.predicted_probs], 
                updates=updates,
                on_unused_input='ignore')
        self.validate_fn = theano.function(self.inps, 
                [loss_fn_val, eval_fn_val, predicted_probs_val]+attentions,
                on_unused_input='ignore')
项目:KB-InfoBot    作者:MiuLab    | 项目源码 | 文件源码
def _init_model(self, in_size, out_size, n_hid=10, learning_rate_sl=0.005, \
            learning_rate_rl=0.005, batch_size=32, ment=0.1):
        # 2-layer MLP
        self.in_size = in_size # x and y coordinate
        self.out_size = out_size # up, down, right, left
        self.batch_size = batch_size
        self.learning_rate = learning_rate_rl
        self.n_hid = n_hid

        input_var, turn_mask, act_mask, reward_var = T.ftensor3('in'), T.imatrix('tm'), \
                T.itensor3('am'), T.fvector('r')

        in_var = T.reshape(input_var, (input_var.shape[0]*input_var.shape[1],self.in_size))

        l_mask_in = L.InputLayer(shape=(None,None), input_var=turn_mask)

        pol_in = T.fmatrix('pol-h')
        l_in = L.InputLayer(shape=(None,None,self.in_size), input_var=input_var)
        l_pol_rnn = L.GRULayer(l_in, n_hid, hid_init=pol_in, mask_input=l_mask_in) # B x H x D
        pol_out = L.get_output(l_pol_rnn)[:,-1,:]
        l_den_in = L.ReshapeLayer(l_pol_rnn, (turn_mask.shape[0]*turn_mask.shape[1], n_hid)) # BH x D
        l_out = L.DenseLayer(l_den_in, self.out_size, nonlinearity=lasagne.nonlinearities.softmax)

        self.network = l_out
        self.params = L.get_all_params(self.network)

        # rl
        probs = L.get_output(self.network) # BH x A
        out_probs = T.reshape(probs, (input_var.shape[0],input_var.shape[1],self.out_size)) # B x H x A
        log_probs = T.log(out_probs)
        act_probs = (log_probs*act_mask).sum(axis=2) # B x H
        ep_probs = (act_probs*turn_mask).sum(axis=1) # B
        H_probs = -T.sum(T.sum(out_probs*log_probs,axis=2),axis=1) # B
        self.loss = 0.-T.mean(ep_probs*reward_var + ment*H_probs)

        updates = lasagne.updates.rmsprop(self.loss, self.params, learning_rate=learning_rate_rl, \
                epsilon=1e-4)

        self.inps = [input_var, turn_mask, act_mask, reward_var, pol_in]
        self.train_fn = theano.function(self.inps, self.loss, updates=updates)
        self.obj_fn = theano.function(self.inps, self.loss)
        self.act_fn = theano.function([input_var, turn_mask, pol_in], [out_probs, pol_out])

        # sl
        sl_loss = 0.-T.mean(ep_probs)
        sl_updates = lasagne.updates.rmsprop(sl_loss, self.params, learning_rate=learning_rate_sl, \
                epsilon=1e-4)

        self.sl_train_fn = theano.function([input_var, turn_mask, act_mask, pol_in], sl_loss, \
                updates=sl_updates)
        self.sl_obj_fn = theano.function([input_var, turn_mask, act_mask, pol_in], sl_loss)