Python theano.tensor 模块,lvector() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.lvector()

项目:top-k-rec    作者:domainxz    | 项目源码 | 文件源码
def _generate_train_model_function(self, scores):
       u = T.lvector('u')
       i = T.lvector('i')
       j = T.lvector('j')
       self.W = theano.shared(numpy.zeros((self._dim)).astype('float32'), name='W');
       self.S = theano.shared(scores, name='S');
       x_ui  = T.dot(self.W, self.S[u,i,:].T);
       x_uj  = T.dot(self.W, self.S[u,j,:].T);
       x_uij = x_ui - x_uj;
       obj = T.sum(
               T.log(T.nnet.sigmoid(x_uij)).sum() - \
               self._lambda_w * 0.5 * (self.W ** 2).sum()
               )
       cost = -obj
       g_cost_W = T.grad(cost=cost, wrt=self.W)
       updates = [
               (self.W, self.W - self._learning_rate * g_cost_W)
               ]
       self.train_model = theano.function(inputs=[u,i,j], outputs=cost, updates=updates);
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet50(input_shape=(None, 3, 224, 224))

        if self.verbose: print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob'])))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_vgg16(input_shape=(None, 3, 224, 224), verbose=self.verbose)
        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet152(input_shape=(None, 3, 224, 224))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:learning-class-invariant-features    作者:sbelharbi    | 项目源码 | 文件源码
def get_inter_output(model, l_tst, testx_sh):
    i_x_vl = T.lvector("ixtst")

    eval_fn_tst = theano.function(
        [i_x_vl],
        [l.output for l in model.layers],
        givens={model.x: testx_sh[i_x_vl]})
    output_v = [
        eval_fn_tst(np.array(l_tst[kkk])) for kkk in range(len(l_tst))]
    nbr_layers = len(output_v[0])

    l_val = []
    for l in range(nbr_layers):
        tmp = None
        for k in output_v:
            if tmp is None:
                tmp = k[l]
            else:
                tmp = np.vstack((tmp, k[l]))
        l_val.append(tmp)

    return l_val
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op(self):
        n = tensor.lscalar()
        f = theano.function([self.p, n], multinomial(n, self.p))

        _n = 5
        tested = f(self._p, _n)
        assert tested.shape == self._p.shape
        assert numpy.allclose(numpy.floor(tested.todense()), tested.todense())
        assert tested[2, 1] == _n

        n = tensor.lvector()
        f = theano.function([self.p, n], multinomial(n, self.p))

        _n = numpy.asarray([1, 2, 3, 4], dtype='int64')
        tested = f(self._p, _n)
        assert tested.shape == self._p.shape
        assert numpy.allclose(numpy.floor(tested.todense()), tested.todense())
        assert tested[2, 1] == _n[2]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_err_bound_list(self):
        n = self.shared(numpy.ones((2, 3), dtype=self.dtype) * 5)
        l = lvector()
        t = n[l]
        # We test again AdvancedSubtensor1 as we transfer data to the cpu.
        self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor1))

        f = self.function([l], t, op=self.adv_sub1)

        # the grad
        g = self.function([l],
                          inc_subtensor(t, numpy.asarray([[1.]], self.dtype)),
                          op=self.adv_incsub1)

        for shp in [[0, 4], [0, -3], [-10]]:
            self.assertRaises(IndexError, f, shp)
            self.assertRaises(IndexError, g, shp)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad(self):
        ones = numpy.ones((1, 3), dtype=self.dtype)
        n = self.shared(ones * 5, broadcastable=(True, False))
        idx = tensor.lvector()
        idx2 = tensor.lvector()
        t = n[idx, idx2]
        self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor))

        utt.verify_grad(lambda m: m[[1, 3], [2, 4]],
                        [numpy.random.rand(5, 5).astype(self.dtype)])

        def fun(x, y):
            return advanced_inc_subtensor(x, y, [1, 3], [2, 4])
        utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
                              numpy.random.rand(2).astype(self.dtype)])

        def fun(x, y):
            return advanced_set_subtensor(x, y, [1, 3], [2, 4])
        utt.verify_grad(fun, [numpy.random.rand(5, 5).astype(self.dtype),
                              numpy.random.rand(2).astype(self.dtype)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad(self):
        x = tensor.matrix('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        xe = op(x, one_of_n)
        f = theano.function([x, one_of_n], xe)
        x_val = numpy.asarray(
            [[.4, .6, .0], [.1, .8, .1]],
            dtype=config.floatX)
        xe_val = f(x_val, [0, 1])
        assert numpy.allclose(xe_val, -numpy.log([.4, .8]))

        def oplike(x):
            return op(x, [0, 1])

        tensor.verify_grad(oplike, [x_val], rng=numpy.random)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_optimizations(self):
        x = tensor.matrix('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        # xe = op(x, one_of_n)

        fgraph = gof.FunctionGraph(
            [x, one_of_n],
            [op(softmax_op(x), one_of_n)])
        assert fgraph.outputs[0].owner.op == op

        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)

        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_optimizations_w_bias_vector(self):
        x = tensor.vector('x')
        b = tensor.vector('b')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        fgraph = gof.FunctionGraph(
            [x, b, one_of_n],
            [op(softmax_op(x + b), one_of_n)])
        assert fgraph.outputs[0].owner.op == op
        # print 'BEFORE'
        # for node in fgraph.toposort():
        #    print node.op
        # print printing.pprint(node.outputs[0])
        # print '----'

        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
        # print 'AFTER'
        # for node in fgraph.toposort():
        #    print node.op
        # print '===='
        assert len(fgraph.toposort()) == 3
        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_optimizations():
    from theano.tensor.nnet.nnet import softmax, crossentropy_categorical_1hot
    x = tensor.fmatrix('x')
    one_of_n = tensor.lvector('one_of_n')
    op = crossentropy_categorical_1hot

    op(x, one_of_n)

    fgraph = theano.gof.FunctionGraph(
        [x, one_of_n],
        [op(softmax(x), one_of_n)])
    assert fgraph.outputs[0].owner.op == op

    mode_with_gpu.optimizer.optimize(fgraph)

    assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
    assert fgraph.outputs[0].owner.inputs[0].owner.op == cuda.host_from_gpu
    assert fgraph.outputs[0].owner.inputs[0].owner.inputs[0].owner.op == cuda.nnet.gpu_crossentropy_softmax_argmax_1hot_with_bias
项目:top-k-rec    作者:domainxz    | 项目源码 | 文件源码
def _generate_train_model_function(self):
        u = T.lvector('u')
        i = T.lvector('i')
        j = T.lvector('j')

        self.W = theano.shared(0.01 * numpy.random.randn(self._n_users, self._K).astype('float32'), name='W');
        self.H = theano.shared(0.01 * numpy.random.randn(self._n_items, self._K).astype('float32'), name='H');
        self.B = theano.shared(numpy.zeros(self._n_items).astype('float32'), name='B')
        x_ui  = T.dot(self.W[u], self.H[i].T).diagonal();
        x_uj  = T.dot(self.W[u], self.H[j].T).diagonal();
        x_uij = self.B[i] - self.B[j] + x_ui - x_uj;

        obj = T.sum(
                T.log(T.nnet.sigmoid(x_uij)).sum() - \
                self._lambda_u * 0.5 * (self.W[u] ** 2).sum() - \
                self._lambda_i * 0.5 * (self.H[i] ** 2).sum() - \
                self._lambda_j * 0.5 * (self.H[j] ** 2).sum() - \
                self._lambda_bias * 0.5 * (self.B[i] ** 2 + self.B[j] ** 2).sum()
              )

        cost = -obj

        g_cost_W = T.grad(cost=cost, wrt=self.W)
        g_cost_H = T.grad(cost=cost, wrt=self.H)
        g_cost_B = T.grad(cost=cost, wrt=self.B)

        updates = [ 
                  (self.W, self.W - self._learning_rate * g_cost_W),
                  (self.H, self.H - self._learning_rate * g_cost_H),
                  (self.B, self.B - self._learning_rate * g_cost_B)
                  ]
        self.train_model = theano.function(inputs=[u,i,j], outputs=cost, updates=updates);
项目:complex    作者:ttrouill    | 项目源码 | 文件源码
def __init__(self):
        self.name = self.__class__.__name__

        #Symbolic expressions for the prediction function (and compiled one too), the loss, the regularization, and the loss to
        #optimize (loss + lmbda * regul)
        #To be defined by the child classes:
        self.pred_func = None
        self.pred_func_compiled = None

        self.loss_func = None
        self.regul_func = None
        self.loss_to_opt = None

        #Symbolic variables for training values
        self.ys = TT.vector('ys')
        self.rows = TT.lvector('rows')
        self.cols = TT.lvector('cols')
        self.tubes = TT.lvector('tubes') 


        #Current values for which the loss is currently compiled
        #3 dimensions:
        self.n = 0 #Number of subject entities
        self.m = 0 #Number of relations
        self.l = 0 #Number of object entities
        #and rank:
        self.k = 0
        #and corresponding number of parameters (i.e. n*k + m*k + l*k for CP_Model)
        self.nb_params = 0
项目:HFT-Prediction    作者:Hunter-Lin    | 项目源码 | 文件源码
def build_model(n_in, n_classes, n_hidden, lr, L1_reg, L2_reg):
    # add comments...

    # Q: why use different types?? 
    # Accroding to inherit properties of x, y.
    x = T.matrix('x')
    y = T.lvector('y')

    W_h, b_h = _init_hidden_weights(n_in, n_hidden)
    W, b = _init_logreg_weights(n_hidden, n_classes)

    p_y_given_x = feed_forward(T.nnet.softmax, 
                    W, b,
                    feed_forward(
                        T.tanh, W_h, b_h, x
                  ))


    cost2 = -T.sum(T.log(p_y_given_x[:,y])) + L1(L1_reg, W, b) + L2(L2_reg, W, b)
    cost = -T.sum(p_y_given_x[:,y]) + L1(L1_reg, W, b) + L2(L2_reg, W, b)
    acc = T.sum(T.neq(y, T.argmax(p_y_given_x, axis=1))) / (y.shape[0]*1.)
    # debug = theano.printing.Print("debug")
    train_model = theano.function(
        inputs = [x, y],
        # outputs = [cost2, debug(acc)],
        outputs = cost2,
        updates = [
            (W, sgd(W, cost, lr)),
            (b, sgd(b, cost, lr)),
            (W_h, sgd(W_h, cost, lr)),
            (b_h, sgd(b_h, cost, lr)),
            ]
        )


    evaluate_model = theano.function(
        inputs = [x, y],
        outputs = acc
        )

    return train_model, evaluate_model
项目:learning-class-invariant-features    作者:sbelharbi    | 项目源码 | 文件源码
def get_inter_output(model, l_tst, testx_sh):
    i_x_vl = T.lvector("ixtst")

    eval_fn_tst = theano.function(
        [i_x_vl],
        [l.output for l in model.layers],
        givens={model.x: testx_sh[i_x_vl]})
    output_v = [
        eval_fn_tst(np.array(l_tst[kkk])) for kkk in range(len(l_tst))]
    nbr_layers = len(output_v[0])

    l_val = []
    for l in range(nbr_layers):
        tmp = None
        for k in output_v:
            if tmp is None:
                tmp = k[l]
            else:
                tmp = np.vstack((tmp, k[l]))
        l_val.append(tmp)

    return l_val


# create_tr_vl_ts_cb("data/2d")

# def knn1(model, l_tst, testx_sh, l_tr, trainx_sh):

# create_tr_vl_ts_nc("data/nestedcircle", 50000)

# DATA MNIST
# =============================================================================
项目:cbt-model    作者:strin    | 项目源码 | 文件源码
def arch_memnet_lexical(self):
        '''
        each memory slot is a lexical.
        '''
        contexts = T.ltensor3('contexts')
        querys = T.lmatrix('querys')
        yvs = T.lvector('yvs')
        hop = 1

        params = []
        question_layer = Embed(self.vocab_size, self.hidden_dim)
        q = T.reshape(question_layer(querys.flatten()),
                      (self.batchsize, self.sen_maxlen, self.hidden_dim)
                      )
        if self.kwargs.get('position_encoding'):
            lmat = position_encoding(self.sen_maxlen, self.hidden_dim).dimshuffle('x', 0, 1)
            print '[memory network] use PE'
            q = q * lmat
        u = mean(q, axis=1)
        params.extend(question_layer.params)

        mem_layers = []
        for hi in range(hop):
            mem_layer = MemoryLayer(self.batchsize, self.mem_size, self.unit_size, self.vocab_size, self.hidden_dim,
                                    **self.kwargs)
            params.extend(mem_layer.params)
            mem_layers.append(mem_layer)
            o = mem_layer(contexts, u)
            u = u + o

        linear = LinearLayer(self.hidden_dim, self.vocab_size)
        params.extend(linear.params)
        probs = softmax(linear(u))
        inputs = {
            'contexts': contexts,
            'querys': querys,
            'yvs': yvs,
            'cvs': T.lmatrix('cvs')
        }
        return (probs, inputs, params)
项目:cbt-model    作者:strin    | 项目源码 | 文件源码
def arch_lstmq(self, param_b=2):

        contexts = T.ltensor3('contexts')
        querys = T.lmatrix('querys')
        yvs = T.lvector('yvs')

        params = []
        question_layer = Embed(self.vocab_size, self.hidden_dim)
        params.extend(question_layer.params)
        q = T.reshape(question_layer(querys.flatten()),
                      (self.batchsize, self.sen_maxlen, self.hidden_dim)
                      )
        lmat = position_encoding(self.sen_maxlen, self.hidden_dim).dimshuffle('x', 0, 1)
        q = q * lmat
        u = mean(q, axis=1)


        embed_layer = Embed(self.vocab_size, self.hidden_dim)
        params.extend(embed_layer.params)
        lmat = position_encoding(self.unit_size, self.hidden_dim).dimshuffle('x', 'x', 0, 1)
        m = T.reshape(embed_layer(contexts.flatten()), (self.batchsize, self.mem_size, self.unit_size, self.hidden_dim))
        m = mean(m * lmat, axis=2)

        lstm = LSTMq(self.batchsize, self.hidden_dim)
        params.extend(lstm.params)
        o = lstm(m.dimshuffle(1, 0, 2), u)

        linear = LinearLayer(self.hidden_dim, self.vocab_size)
        params.extend(linear.params)
        probs = softmax(linear(o))

        inputs = {
            'contexts': contexts,
            'querys': querys,
            'yvs': yvs,
            'cvs': T.lmatrix('cvs')
        }
        return (probs, inputs, params)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparse_from_list(self):
        x = tensor.matrix('x')
        vals = tensor.matrix('vals')
        ilist = tensor.lvector('ilist')

        out = construct_sparse_from_list(x, vals, ilist)
        self._compile_and_check(
                [x, vals, ilist],
                [out],
                [numpy.zeros((40, 10), dtype=config.floatX),
                 numpy.random.randn(12, 10).astype(config.floatX),
                 numpy.random.randint(low=0, high=40, size=(12,))],
                ConstructSparseFromList
                )
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_symbolic_shape(self):
        rng_R = random_state_type()
        shape = tensor.lvector()
        post_r, out = uniform(rng_R, shape, ndim=2)
        f = compile.function([rng_R, shape], out)
        rng_state0 = numpy.random.RandomState(utt.fetch_seed())

        assert f(rng_state0, [2, 3]).shape == (2, 3)
        assert f(rng_state0, [4, 8]).shape == (4, 8)

        self.assertRaises(ValueError, f, rng_state0, [4])
        self.assertRaises(ValueError, f, rng_state0, [4, 3, 4, 5])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_binomial_vector(self):
        rng_R = random_state_type()
        n = tensor.lvector()
        prob = tensor.vector()
        post_r, out = binomial(rng_R, n=n, p=prob)
        assert out.ndim == 1
        f = compile.function([rng_R, n, prob], [post_r, out],
                             accept_inplace=True)

        n_val = [1, 2, 3]
        prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)
        rng = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())

        # Arguments of size (3,)
        rng0, val0 = f(rng, n_val, prob_val)
        numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])
        numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = compile.function([rng_R, n, prob],
                binomial(rng_R, n=n, p=prob, size=(3,)),
                accept_inplace=True)
        rng2, val2 = g(rng1, n_val, prob_val)
        numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, rng2, n_val[:-1], prob_val[:-1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_multinomial_vector(self):
        rng_R = random_state_type()
        n = tensor.lvector()
        pvals = tensor.matrix()
        post_r, out = multinomial(rng_R, n=n, pvals=pvals)
        assert out.ndim == 2
        f = compile.function([rng_R, n, pvals], [post_r, out],
                             accept_inplace=True)

        n_val = [1, 2, 3]
        pvals_val = [[.1, .9], [.2, .8], [.3, .7]]
        pvals_val = numpy.asarray(pvals_val, dtype=config.floatX)
        rng = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())

        # Arguments of size (3,)
        rng0, val0 = f(rng, n_val, pvals_val)
        numpy_val0 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val, pvals_val)])
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        rng1, val1 = f(rng0, n_val[:-1], pvals_val[:-1])
        numpy_val1 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val[:-1], pvals_val[:-1])])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = compile.function([rng_R, n, pvals],
                multinomial(rng_R, n=n, pvals=pvals, size=(3,)),
                accept_inplace=True)
        rng2, val2 = g(rng1, n_val, pvals_val)
        numpy_val2 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val, pvals_val)])
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, rng2, n_val[:-1], pvals_val[:-1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_upcast_elemwise_constant_inputs():
    s = dvector("s")
    x = tensor.sum(tensor.log(10 ** s))
    f = function([s], [tensor.grad(x, s)])
    f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12])

    # This test a corner where the optimization should not be applied.
    old = theano.config.floatX
    theano.config.floatX = 'float32'
    try:
        v = lvector()
        function([v], theano.tensor.basic.true_div(v, 2))
    finally:
        theano.config.floatX = old
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_adv_constant_arg(self):
        # Test case provided (and bug detected, gh-607) by John Salvatier
        m = matrix('m')
        gv = numpy.array([0, 1, 3])
        g = theano.tensor.constant(gv)
        i = theano.tensor.lvector('i')

        # s1 used to fail
        s1 = m[gv, i]
        s2 = m[g, i]

        assert gof.graph.is_same_graph(s1, s2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_adv1_inc_sub_notlastdim_1_2dval_broadcast(self):
        # Test that taking 1-dimensional advanced indexing
        # over a dimension that's not the first (outer-most),
        # and incrementing/setting with broadcast
        m = matrix('m')

        # Test for both vector and matrix as index
        sym_i = (lvector('i'), lmatrix('i'))
        shape_i = ((4,), (4, 2))
        shape_val = ((3, 1), (3, 1, 1))

        # Disable the warning emitted for that case
        orig_warn = config.warn.inc_set_subtensor1
        try:
            config.warn.inc_set_subtensor1 = False

            for i, shp_i, shp_v in zip(sym_i, shape_i, shape_val):
                sub_m = m[:, i]
                m1 = set_subtensor(sub_m, numpy.zeros(shp_v))
                m2 = inc_subtensor(sub_m, numpy.ones(shp_v))
                f = theano.function([m, i], [m1, m2])

                m_val = rand(3, 5)
                i_val = randint_ranged(min=0, max=4, shape=shp_i)
                m1_ref = m_val.copy()
                m2_ref = m_val.copy()

                m1_val, m2_val = f(m_val, i_val)
                for idx in i_val.ravel():
                    m1_ref[:, idx] = 0
                    m2_ref[:, idx] += 1

                assert numpy.allclose(m1_val, m1_ref), (m1_val, m1_ref)
                assert numpy.allclose(m2_val, m2_ref), (m2_val, m2_ref)
        finally:
            config.warn.inc_set_subtensor1 = orig_warn
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_adv1_inc_sub_notlastdim_1_2dval_no_broadcast(self):
        # Test that taking 1-dimensional advanced indexing
        # over a dimension that's not the first (outer-most),
        # and incrementing/setting without broadcast
        m = matrix('m')

        # Test for both vector and matrix as index
        sym_i = (lvector('i'), lmatrix('i'))
        shape_i = ((4,), (4, 2))
        shape_val = ((3, 4), (3, 4, 2))

        # Disable the warning emitted for that case
        orig_warn = config.warn.inc_set_subtensor1

        try:
            config.warn.inc_set_subtensor1 = False
            for i, shp_i, shp_v in zip(sym_i, shape_i, shape_val):
                sub_m = m[:, i]
                m1 = set_subtensor(sub_m, numpy.zeros(shp_v))
                m2 = inc_subtensor(sub_m, numpy.ones(shp_v))
                f = theano.function([m, i], [m1, m2])

                m_val = rand(3, 5)
                i_val = randint_ranged(min=0, max=4, shape=shp_i)
                m1_ref = m_val.copy()
                m2_ref = m_val.copy()

                m1_val, m2_val = f(m_val, i_val)
                # We have to explicitly loop over all individual indices,
                # not as a list or array, numpy only increments the indexed
                # elements once even if the indices are repeated.
                for idx in i_val.ravel():
                    m1_ref[:, idx] = 0
                    m2_ref[:, idx] += 1

                assert numpy.allclose(m1_val, m1_ref), (m1_val, m1_ref)
                assert numpy.allclose(m2_val, m2_ref), (m2_val, m2_ref)
        finally:
            config.warn.inc_set_subtensor1 = orig_warn
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.rng = numpy.random.RandomState(seed=utt.fetch_seed())

        self.s = tensor.iscalar()
        self.v = tensor.fvector()
        self.m = tensor.dmatrix()
        self.t = tensor.ctensor3()

        self.adv1q = tensor.lvector()  # advanced 1d query
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.s = iscalar()
        self.v = fvector()
        self.m = dmatrix()
        self.t = ctensor3()
        self.ft4 = ftensor4()

        self.ix1 = lvector()  # advanced 1d query
        self.ix12 = lvector()
        self.ix2 = lmatrix()
        self.ixr = lrow()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_bug_2009_06_02_trac_387():
    y = tensor.lvector('y')
    f = theano.function([y],
            tensor.int_div(
                tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2))
    print(f(numpy.ones(1, dtype='int64') * 3))
    # XXX: there is no assert, nor comment that DEBUGMODE is to do the
    #      checking. What was the bug, and how is it being tested?
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_reuse():
    x = T.lvector()
    y = T.lvector()
    f = theano.function([x, y], x + y)

    # provide both inputs in the first call
    f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))

    try:
        f(numpy.ones(10))
    except TypeError:
        return
    assert not 'should not get here'
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_symbolic_shape(self):
        random = RandomStreams(utt.fetch_seed())
        shape = tensor.lvector()
        f = function([shape], random.uniform(size=shape, ndim=2))

        assert f([2, 3]).shape == (2, 3)
        assert f([4, 8]).shape == (4, 8)
        self.assertRaises(ValueError, f, [4])
        self.assertRaises(ValueError, f, [4, 3, 4, 5])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_binomial_vector(self):
        random = RandomStreams(utt.fetch_seed())
        n = tensor.lvector()
        prob = tensor.vector()
        out = random.binomial(n=n, p=prob)
        assert out.ndim == 1
        f = function([n, prob], out)

        n_val = [1, 2, 3]
        prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(n_val, prob_val)
        numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(n_val[:-1], prob_val[:-1])
        numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([n, prob], random.binomial(n=n, p=prob, size=(3,)))
        val2 = g(n_val, prob_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, n_val[:-1], prob_val[:-1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_multinomial_vector(self):
        random = RandomStreams(utt.fetch_seed())
        n = tensor.lvector()
        pvals = tensor.matrix()
        out = random.multinomial(n=n, pvals=pvals)
        assert out.ndim == 2
        f = function([n, pvals], out)

        n_val = [1, 2, 3]
        pvals_val = [[.1, .9], [.2, .8], [.3, .7]]
        pvals_val = numpy.asarray(pvals_val, dtype=config.floatX)
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(n_val, pvals_val)
        numpy_val0 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val, pvals_val)])
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(n_val[:-1], pvals_val[:-1])
        numpy_val1 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val[:-1], pvals_val[:-1])])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([n, pvals], random.multinomial(n=n, pvals=pvals, size=(3,)))
        val2 = g(n_val, pvals_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
            for nv, pv in zip(n_val, pvals_val)])
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, n_val[:-1], pvals_val[:-1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        admat = matrix()
        advec = vector()
        alvec = lvector()
        rng = numpy.random.RandomState(utt.fetch_seed())
        admat_val = rng.rand(10, 5).astype(config.floatX)
        admat_val /= admat_val.sum(axis=1).reshape(10, 1)
        advec_val = rng.rand(10).astype(config.floatX)
        alvec_val = rng.randint(low=0, high=5, size=10)
        self._compile_and_check(
            [advec, admat, alvec],
            [CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)],
            [advec_val, admat_val, alvec_val],
            CrossentropySoftmax1HotWithBiasDx)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_neg_idx(self):
        admat = matrix()
        advec = vector()
        alvec = lvector()
        rng = numpy.random.RandomState(utt.fetch_seed())
        admat_val = rng.rand(10, 5).astype(config.floatX)
        admat_val /= admat_val.sum(axis=1).reshape(10, 1)
        advec_val = rng.rand(10).astype(config.floatX)
        alvec_val = rng.randint(low=0, high=5, size=10)
        alvec_val[1] = -1
        out = CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)
        f = theano.function([advec, admat, alvec], out)
        self.assertRaises(ValueError, f, advec_val, admat_val, alvec_val)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        admat = matrix()
        advec = vector()
        alvec = lvector()
        rng = numpy.random.RandomState(utt.fetch_seed())
        admat_val = rng.rand(3, 5).astype(config.floatX)
        advec_val = rng.rand(5).astype(config.floatX)
        alvec_val = rng.randint(low=0, high=5, size=3)
        self._compile_and_check(
            [admat, advec, alvec],
            CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec),
            [admat_val, advec_val, alvec_val],
            CrossentropySoftmaxArgmax1HotWithBias)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_neg_idx(self):
        admat = matrix()
        advec = vector()
        alvec = lvector()
        rng = numpy.random.RandomState(utt.fetch_seed())
        admat_val = rng.rand(3, 5).astype(config.floatX)
        advec_val = rng.rand(5).astype(config.floatX)
        alvec_val = rng.randint(low=0, high=5, size=3)
        alvec_val[1] = -1
        out = CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec)
        f = theano.function([admat, advec, alvec], out)
        self.assertRaises(ValueError, f, admat_val, advec_val, alvec_val)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        admat = matrix()
        alvec = lvector()
        rng = numpy.random.RandomState(utt.fetch_seed())
        admat_val = rng.rand(3, 2).astype(config.floatX)
        alvec_val = [0, 1, 0]
        self._compile_and_check(
            [admat, alvec],
            [CrossentropyCategorical1Hot()(admat, alvec)],
            [admat_val, alvec_val],
            CrossentropyCategorical1Hot)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_optimizations_vector(self):
        x = tensor.vector('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        fgraph = gof.FunctionGraph(
            [x, one_of_n],
            [op(softmax_op(x), one_of_n)])
        assert fgraph.outputs[0].owner.op == op

        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_optimizations_w_bias(self):
        x = tensor.matrix('x')
        b = tensor.vector('b')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        # xe = op(x, one_of_n)

        fgraph = gof.FunctionGraph(
            [x, b, one_of_n],
            [op(softmax_op(x + b), one_of_n)])
        assert fgraph.outputs[0].owner.op == op

        # print 'BEFORE'
        # for node in fgraph.toposort():
        #    print node.op
        # print printing.pprint(node.outputs[0])
        # print '----'

        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)

        # print 'AFTER'
        # for node in fgraph.toposort():
        #    print node.op
        # print printing.pprint(node.outputs[0])
        # print '===='
        assert len(fgraph.toposort()) == 2

        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_grad_optimizations_vector(self):
        x = tensor.vector('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        xe = op(softmax_op(x), one_of_n)
        sum_xe = tensor.sum(xe)
        g_x = tensor.grad(sum_xe, x)
        fgraph = gof.FunctionGraph(
            [x, one_of_n],
            [g_x])

        # print 'BEFORE'
        # for node in fgraph.toposort():
        #    print node.op, node.inputs
        # print '----'
        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)

        # print 'AFTER'
        # for node in fgraph.toposort():
        #    print node.op, node.inputs

        has_cx1hot = False
        has_cx1hotdx = False
        has_softmax = False
        has_softmaxdx = False
        for node in fgraph.toposort():
            if node.op == crossentropy_softmax_argmax_1hot_with_bias:
                has_cx1hot = True
            if node.op == crossentropy_softmax_1hot_with_bias_dx:
                has_cx1hotdx = True
            if node.op == softmax_op:
                has_softmax = True
            if node.op == softmax_grad:
                has_softmaxdx = True
        assert not has_cx1hot
        assert has_cx1hotdx
        assert has_softmax
        assert not has_softmaxdx
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad_types(self):
        # This function simply tests the behaviour of the AbstractConv
        # Ops, not their optimizations
        input = self.input
        filters = self.filters
        topgrad = self.topgrad

        out_shape = tensor.lvector()

        output = conv.conv2d(input, filters)
        grad_input, grad_filters = theano.grad(output.sum(),
                                               wrt=(input, filters))
        assert grad_input.type == input.type, (
            grad_input, grad_input.type, input, input.type)
        assert grad_filters.type == filters.type, (
            grad_filters, grad_filters.type, filters, filters.type)

        grad_filters = conv.AbstractConv2d_gradWeights()(
            input, topgrad, out_shape)
        grad_input, grad_topgrad = theano.grad(grad_filters.sum(),
                                               wrt=(input, topgrad))

        assert grad_input.type == input.type, (
            grad_input, grad_input.type, input, input.type)
        assert grad_topgrad.type == topgrad.type, (
            grad_topgrad, grad_topgrad.type, topgrad, topgrad.type)

        grad_input = conv.AbstractConv2d_gradInputs()(
            filters, topgrad, out_shape)
        grad_filters, grad_topgrad = theano.grad(grad_input.sum(),
                                                 wrt=(filters, topgrad))

        assert grad_filters.type == filters.type, (
            grad_filters, grad_filters.type, filters, filters.type)
        assert grad_topgrad.type == topgrad.type, (
            grad_topgrad, grad_topgrad.type, topgrad, topgrad.type)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x):
        return Apply(self, [x], [tensor.lvector()])
项目:top-k-rec    作者:domainxz    | 项目源码 | 文件源码
def _generate_train_model_function(self, content):
        u = T.lvector('u')
        i = T.lvector('i')
        j = T.lvector('j')

        self.W = theano.shared(0.01 * numpy.random.randn(self._n_users, self._K).astype('float32'), name='W')
        self.H = theano.shared(0.01 * numpy.random.randn(self._n_items, self._K).astype('float32'), name='H')
        self.P = theano.shared(0.01 * numpy.random.randn(self._n_users, self._K).astype('float32'), name='P')
        self.E = theano.shared(0.01 * numpy.random.randn(self._K, content.shape[1]).astype('float32'), name='E')
        self.F = theano.shared(content, name='F');
        self.B = theano.shared(numpy.zeros(self._n_items).astype('float32'), name='B')
        self.C = theano.shared(numpy.zeros(content.shape[1]).astype('float32'), name='C')

        dF_ij = self.F[i] - self.F[j];
        x_ui  = T.dot(self.W[u], self.H[i].T).diagonal();
        x_uj  = T.dot(self.W[u], self.H[j].T).diagonal();
        x_uij = self.B[i] - self.B[j] + \
                x_ui - x_uj + \
                T.dot(self.P[u], T.dot(self.E, dF_ij.T)).diagonal() + \
                T.dot(self.C, dF_ij.T);

        obj = T.sum(T.log(T.nnet.sigmoid(x_uij)).sum() - \
              self._lambda_u * 0.5 * (self.W[u] ** 2).sum() - \
              self._lambda_i * 0.5 * (self.H[i] ** 2).sum() - \
              self._lambda_j * 0.5 * (self.H[j] ** 2).sum() - \
              self._lambda_bias * 0.5 * (self.B[i] ** 2 + self.B[j] ** 2).sum()- \
              self._lambda_bias * 0.5 * (self.C ** 2).sum() - \
              self._lambda_e * 0.5 * (self.E ** 2).sum() - \
              self._lambda_u * 0.5 * (self.P[u] ** 2).sum()
              )

        cost = -obj

        g_cost_W = T.grad(cost=cost, wrt=self.W)
        g_cost_H = T.grad(cost=cost, wrt=self.H)
        g_cost_B = T.grad(cost=cost, wrt=self.B)
        g_cost_P = T.grad(cost=cost, wrt=self.P)
        g_cost_E = T.grad(cost=cost, wrt=self.E)
        g_cost_C = T.grad(cost=cost, wrt=self.C)

        updates = [ 
                    (self.W, self.W - self._learning_rate * g_cost_W),
                    (self.H, self.H - self._learning_rate * g_cost_H),
                    (self.B, self.B - self._learning_rate * g_cost_B),
                    (self.P, self.P - self._learning_rate * g_cost_P),
                    (self.E, self.E - self._learning_rate * g_cost_E),
                    (self.C, self.C - self._learning_rate * g_cost_C)
                    ]

        self.train_model = theano.function(inputs=[u,i,j], outputs=cost, updates=updates);
项目:theano-BPR    作者:hexiangnan    | 项目源码 | 文件源码
def __init__(self, train, test, num_user, num_item, 
                 factors, learning_rate, reg, init_mean, init_stdev):
        '''
        Constructor
        '''
        self.train = train
        self.test = test
        self.num_user = num_user
        self.num_item = num_item
        self.factors = factors
        self.learning_rate = learning_rate
        self.reg = reg

        # user & item latent vectors
        U_init = np.random.normal(loc=init_mean, scale=init_stdev, size=(num_user, factors))
        V_init = np.random.normal(loc=init_mean, scale=init_stdev, size=(num_item, factors))
        self.U = theano.shared(value = U_init.astype(theano.config.floatX), 
                               name = 'U', borrow = True)
        self.V = theano.shared(value = V_init.astype(theano.config.floatX),
                               name = 'V', borrow = True)

        # Each element is the set of items for a user, used for negative sampling
        self.items_of_user = []
        self.num_rating = 0     # number of ratings
        for u in xrange(len(train)):
            self.items_of_user.append(Set([]))
            for i in xrange(len(train[u])):
                item = train[u][i][0]
                self.items_of_user[u].add(item)
                self.num_rating += 1

        # batch variables for computing gradients
        u = T.lvector('u')
        i = T.lvector('i')
        j = T.lvector('j')
        lr = T.scalar('lr')

        # loss of the sample
        y_ui = T.dot(self.U[u], self.V[i].T).diagonal()   #1-d vector of diagonal values
        y_uj = T.dot(self.U[u], self.V[j].T).diagonal()
        regularizer = self.reg * ((self.U[u] ** 2).sum() +
                                  (self.V[i] ** 2).sum() +
                                  (self.V[j] ** 2).sum())
        loss = regularizer - T.sum(T.log(T.nnet.sigmoid(y_ui - y_uj)))
        # SGD step
        self.sgd_step = theano.function([u, i, j, lr], [],
                                        updates = [(self.U, self.U - lr * T.grad(loss, self.U)),
                                                   (self.V, self.V - lr * T.grad(loss, self.V))])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_vector_arguments(self):
        rng_R = random_state_type()
        low = tensor.vector()
        post_r, out = uniform(rng_R, low=low, high=1)
        assert out.ndim == 1
        f = compile.function([rng_R, low], [post_r, out], accept_inplace=True)

        def as_floatX(thing):
            return numpy.asarray(thing, dtype=theano.config.floatX)

        rng_state0 = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())
        post0, val0 = f(rng_state0, [-5, .5, 0, 1])
        post1, val1 = f(post0, as_floatX([.9]))
        numpy_val0 = as_floatX(numpy_rng.uniform(low=[-5, .5, 0, 1], high=1))
        numpy_val1 = as_floatX(numpy_rng.uniform(low=as_floatX([.9]), high=1))

        assert numpy.all(val0 == numpy_val0)
        assert numpy.all(val1 == numpy_val1)

        high = tensor.vector()
        post_rb, outb = uniform(rng_R, low=low, high=high)
        assert outb.ndim == 1
        fb = compile.function([rng_R, low, high], [post_rb, outb],
                              accept_inplace=True)

        post0b, val0b = fb(post1, [-4., -2], [-1, 0])
        post1b, val1b = fb(post0b, [-4.], [-1])
        numpy_val0b = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))
        numpy_val1b = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))
        assert numpy.all(val0b == numpy_val0b)
        assert numpy.all(val1b == numpy_val1b)
        self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1, 0, 1])
        # TODO: do we want that?
        #self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1])

        size = tensor.lvector()
        post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1)
        fc = compile.function([rng_R, low, high, size], [post_rc, outc],
                              accept_inplace=True)
        post0c, val0c = fc(post1b, [-4., -2], [-1, 0], [2])
        post1c, val1c = fc(post0c, [-4.], [-1], [1])
        numpy_val0c = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))
        numpy_val1c = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))
        assert numpy.all(val0c == numpy_val0c)
        assert numpy.all(val1c == numpy_val1c)
        self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1])
        self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1, 2])
        self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [2, 1])
        self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [1])
        # TODO: do we want that?
        #self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [2])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_fill_useless():
    # Test opt local_fill_cut
    x = dvector()
    y = dvector()
    z = lvector()
    m = dmatrix()

    x_ = numpy.random.rand(5,)
    y_ = numpy.random.rand(5,)
    z_ = (numpy.random.rand(5,) * 5).astype("int64")
    m_ = numpy.random.rand(5, 5)

    # basic case
    f = function([x], T.fill(x, x) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_)

    # basic case
    f = function([x, y], T.second(y, x) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_, y_)

    # basic case
    f = function([x, y], T.fill(x, y) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_, y_)

    # now with different type(cast)
    f = function([x, z], T.fill(z, x) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_, z_)

    # now with different type(cast)
    f = function([x, z], T.fill(x, z) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_, z_)

    # now cutting out the input ??
    f = function([x, y], T.fill(x, y) * 2, mode=mode_opt)
    assert [node.op for node in f.maker.fgraph.toposort()] == [T.mul]
    f(x_, y_)

    # Test with different number of dimensions
    # The fill is not useless, so it should stay
    f = function([m, x], T.fill(m, x) * 2, mode=mode_opt)
    ops = [node.op.__class__ for node in f.maker.fgraph.toposort()]
    assert T.Alloc in ops
    f(m_, x_)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_vector_arguments(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dvector()
        out = random.uniform(low=low, high=1)
        assert out.ndim == 1
        f = function([low], out)

        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        val0 = f([-5, .5, 0, 1])
        val1 = f([.9])
        numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=1)
        numpy_val1 = numpy_rng.uniform(low=[.9], high=1)
        assert numpy.all(val0 == numpy_val0)
        assert numpy.all(val1 == numpy_val1)

        high = tensor.vector()
        outb = random.uniform(low=low, high=high)
        assert outb.ndim == 1
        fb = function([low, high], outb)

        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        val0b = fb([-4., -2], [-1, 0])
        val1b = fb([-4.], [-1])
        numpy_val0b = numpy_rng.uniform(low=[-4., -2], high=[-1, 0])
        numpy_val1b = numpy_rng.uniform(low=[-4.], high=[-1])
        assert numpy.all(val0b == numpy_val0b)
        assert numpy.all(val1b == numpy_val1b)
        self.assertRaises(ValueError, fb, [-4., -2], [-1, 0, 1])
        # TODO: do we want that?
        #self.assertRaises(ValueError, fb, [-4., -2], [-1])

        size = tensor.lvector()
        outc = random.uniform(low=low, high=high, size=size, ndim=1)
        fc = function([low, high, size], outc)

        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        val0c = fc([-4., -2], [-1, 0], [2])
        val1c = fc([-4.], [-1], [1])
        numpy_val0c = numpy_rng.uniform(low=[-4., -2], high=[-1, 0])
        numpy_val1c = numpy_rng.uniform(low=[-4.], high=[-1])
        assert numpy.all(val0c == numpy_val0c)
        assert numpy.all(val1c == numpy_val1c)
        self.assertRaises(ValueError, fc, [-4., -2], [-1, 0], [1])
        self.assertRaises(ValueError, fc, [-4., -2], [-1, 0], [1, 2])
        self.assertRaises(ValueError, fc, [-4., -2], [-1, 0], [2, 1])
        self.assertRaises(ValueError, fc, [-4., -2], [-1], [1])
        # TODO: do we want that?
        #self.assertRaises(ValueError, fc, [-4., -2], [-1], [2])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_grad_optimizations(self):
        x = tensor.matrix('x')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        xe = op(softmax_op(x), one_of_n)
        sum_xe = tensor.sum(xe)
        g_x = tensor.grad(sum_xe, x)
        fgraph = gof.FunctionGraph(
            [x, one_of_n],
            [g_x])
        assert check_stack_trace(
            fgraph, ops_to_check=[crossentropy_softmax_1hot_with_bias_dx,
                                  softmax_op])

        # print 'BEFORE'
        # for node in fgraph.toposort():
        #    print node.op, node.inputs
        # print '----'
        theano.compile.mode.optdb.query(
            theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)

        # print 'AFTER'
        # for node in fgraph.toposort():
        #    print node.op, node.inputs

        has_cx1hot = False
        has_cx1hotdx = False
        has_softmax = False
        has_softmaxdx = False
        for node in fgraph.toposort():
            if node.op == crossentropy_softmax_argmax_1hot_with_bias:
                has_cx1hot = True
            if node.op == crossentropy_softmax_1hot_with_bias_dx:
                has_cx1hotdx = True
            if node.op == softmax_op:
                has_softmax = True
            if node.op == softmax_grad:
                has_softmaxdx = True
        assert not has_cx1hot
        assert has_cx1hotdx
        assert has_softmax
        assert not has_softmaxdx
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_xent_thing_int32(self):
        verbose = 0
        mode = theano.compile.mode.get_default_mode()
        if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
            mode = 'FAST_RUN'
        rng = numpy.random.RandomState(utt.fetch_seed())
        x_val = rng.randn(3, 5).astype(config.floatX)
        y_val = numpy.asarray([2, 4, 1], dtype='int64')
        x = T.matrix('x')
        y = T.lvector('y')
        yi = T.cast(y, 'int32')
        expressions = [
            T.sum(-T.log(softmax(x)[T.arange(yi.shape[0]), yi])),
            -T.sum(T.log(softmax(x)[T.arange(yi.shape[0]), yi])),
            -T.sum(T.log(softmax(x))[T.arange(yi.shape[0]), yi]),
            T.sum(-T.log(softmax(x))[T.arange(yi.shape[0]), yi])]

        for expr in expressions:
            # Verify the optimizer worked on the expressions
            f = theano.function([x, y], expr, mode=mode)
            if verbose:
                theano.printing.debugprint(f)
            try:
                ops = [node.op for node in f.maker.fgraph.toposort()]
                assert len(ops) == 5
                assert crossentropy_softmax_argmax_1hot_with_bias in ops
                assert not [1 for o in ops
                            if isinstance(o, T.AdvancedSubtensor)]
                f(x_val, y_val)
            except Exception:
                theano.printing.debugprint(f)
                raise

            # Also verify the gradient wrt x
            g = theano.function([x, y], T.grad(expr, x), mode=mode)
            if verbose:
                theano.printing.debugprint(g)
            try:
                ops = [node.op for node in g.maker.fgraph.toposort()]
                assert len(ops) == 3
                assert crossentropy_softmax_1hot_with_bias_dx in ops
                assert softmax_op in ops
                assert softmax_grad not in ops
                g(x_val, y_val)
            except Exception:
                theano.printing.debugprint(g)
                raise