Python theano.tensor 模块,dvector() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.dvector()

项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2))#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.max( (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.sum( (([u ,u,u,u,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.sum( (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"):
    t3 = timeit.Timer("tf(av,bv)",
                      """
import theano
import theano.tensor as T
import numexpr as ne
from theano.tensor import exp
%(init)s
av=a
bv=b
a=T.dvector()
b=T.dvector()
tf= theano.function([a,b],%(expr)s)
"""%locals()
)
    ret=t3.repeat(nb_repeat,nb_call)
    return np.asarray(ret)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pydotprint_long_name():
    """This is a REALLY PARTIAL TEST.

    It prints a graph where there are variable and apply nodes whose long
    names are different, but not the shortened names.
    We should not merge those nodes in the dot graph.

    """

    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()
    mode = theano.compile.mode.get_default_mode().excluding("fusion")
    f = theano.function([x], [x * 2, x + x], mode=mode)
    f([1, 2, 3, 4])

    theano.printing.pydotprint(f, max_label_size=5,
                               print_output_file=False)
    theano.printing.pydotprint([x * 2, x + x],
                               max_label_size=5,
                               print_output_file=False)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_basic_usage(self):
        rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
        assert not rf.inplace
        assert getattr(rf, 'destroy_map', {}) == {}

        rng_R = random_state_type()

        # If calling RandomFunction directly, all args have to be specified,
        # because shape will have to be moved to the end
        post_r, out = rf(rng_R, (4,), 0., 1.)

        assert out.type == tensor.dvector

        f = compile.function([rng_R], out)

        rng_state0 = numpy.random.RandomState(utt.fetch_seed())

        f_0 = f(rng_state0)
        f_1 = f(rng_state0)

        assert numpy.all(f_0 == f_1)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcast_arguments(self):
        rng_R = random_state_type()
        low = tensor.dvector()
        high = tensor.dcol()
        post_r, out = uniform(rng_R, low=low, high=high)
        assert out.ndim == 2
        f = compile.function([rng_R, low, high], [post_r, out],
                             accept_inplace=True)

        rng_state0 = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(utt.fetch_seed())
        post0, val0 = f(rng_state0, [-5, .5, 0, 1], [[1.]])
        post1, val1 = f(post0, [.9], [[1.], [1.1], [1.5]])
        post2, val2 = f(post1, [-5, .5, 0, 1], [[1.], [1.1], [1.5]])

        numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.])
        numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]])
        numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1],
                                       high=[[1.], [1.1], [1.5]])

        assert numpy.all(val0 == numpy_val0), (val0, numpy_val0)
        assert numpy.all(val1 == numpy_val1)
        assert numpy.all(val2 == numpy_val2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_constant_folding():
    """ Test that constant folding get registered at fast_compile

    An error removed that registration during the registration.
    """
    x = tensor.dvector()
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([x], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2

    # Test that we do not crash when constant folding elemwise scalar
    # as they should not generate c code.

    x = tensor.constant(3)
    assert x.ndim == 0
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2
    assert all([isinstance(n.op, DeepCopyOp) for n in topo])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_div_switch_sink(self):
        c = T.dscalar()
        idx = 0
        for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]:
            for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]:
                y = T.true_div(T.switch(condition[0] > 0, 1. *
                    x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0]))
                f = theano.function([condition[0], x[0], c]
                    , [y], mode=self.mode)
                if type(condition[1]) is list:
                    for i in xrange(len(condition[1])):
                        res = f(condition[1][i], x[1], -1)
                        assert (res == numpy.
                            asarray(self.resm[idx][i])).sum() == self.resm[idx][i].size
                else:
                    res = f(condition[1], x[1], -1)
                    assert (res == numpy.asarray(self.
                        resm[idx])).sum() == self.resm[idx].size
                idx += 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_opt_runtime():
    """In the original merge optimization, the following graph took
    like caused the MERGE optimizer to exhibit really bad performance
    (quadratic? exponential?)

    Ironically, there is actually no merging to do in this graph.

    """
    x = T.dvector()
    for i in xrange(50):
        if i:
            r = r + r/10
        else:
            r = x
    t = time.time()
    f = theano.function([x], r, mode='FAST_COMPILE')
    # FAST_RUN does in-place optimizer which requires a lot of
    # toposorting, which is actually pretty slow at the moment.  This
    # test was designed to test MergeOptimizer... so I'm leaving
    # toposort optimizations for a later date.
    dt = time.time() - t

    # it should never take longer than 5 seconds to compile this graph
    assert dt < 5.0, dt
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcast_arguments(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dvector()
        high = tensor.dcol()
        out = random.uniform(low=low, high=high)
        assert out.ndim == 2
        f = function([low, high], out)

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        numpy_rng = numpy.random.RandomState(int(rng_seed))
        val0 = f([-5, .5, 0, 1], [[1.]])
        val1 = f([.9], [[1.], [1.1], [1.5]])
        val2 = f([-5, .5, 0, 1], [[1.], [1.1], [1.5]])

        numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.])
        numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]])
        numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[[1.], [1.1], [1.5]])

        assert numpy.all(val0 == numpy_val0)
        assert numpy.all(val1 == numpy_val1)
        assert numpy.all(val2 == numpy_val2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_stack_hessian(self):
        # Test the gradient of stack when used in hessian, see gh-1589
        a = tensor.dvector('a')
        b = tensor.dvector('b')
        A = stack([a, b])
        B = A.T.dot(A)
        Ha, Hb = hessian(B.sum(), [a, b])

        # Try some values
        a_v = numpy.random.rand(4)
        b_v = numpy.random.rand(4)
        f = theano.function([a, b], [Ha, Hb])
        Ha_v, Hb_v = f(a_v, b_v)
        # The Hessian is always a matrix full of 2
        assert Ha_v.shape == (4, 4)
        assert Hb_v.shape == (4, 4)
        assert numpy.allclose(Ha_v, 2.)
        assert numpy.allclose(Hb_v, 2.)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_stack_hessian2(self):
        # Test the hessian macro when the gradient itself does not depend
        # on the input (but the cost does)
        a = tensor.dvector('a')
        b = tensor.dvector('b')
        A = stack([a, b])
        Ha, Hb = hessian(A.sum(), [a, b])

        # Try some values
        a_v = numpy.random.rand(4)
        b_v = numpy.random.rand(4)
        f = theano.function([a, b], [Ha, Hb])
        Ha_v, Hb_v = f(a_v, b_v)
        # The Hessian is always a matrix full of 0
        assert Ha_v.shape == (4, 4)
        assert Hb_v.shape == (4, 4)
        assert numpy.allclose(Ha_v, 0.)
        assert numpy.allclose(Hb_v, 0.)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        a = tensor.dvector()
        self._compile_and_check([a], [self.op(a, 16, 0)],
                                [numpy.random.rand(12)],
                               self.op_class)
        a = tensor.dmatrix()
        for var in [self.op(a, 16, 1), self.op(a, None, 1),
                     self.op(a, 16, None), self.op(a, None, None)]:
            self._compile_and_check([a], [var],
                                    [numpy.random.rand(12, 4)],
                                    self.op_class)
        b = tensor.iscalar()
        for var in [self.op(a, 16, b), self.op(a, None, b)]:
            self._compile_and_check([a, b], [var],
                                    [numpy.random.rand(12, 4), 0],
                                    self.op_class)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        x = dmatrix('x')
        x.tag.test_value = np.zeros((2, 2))
        y = dvector('y')
        y.tag.test_value = [0, 0]

        def infer_shape(node, shapes):
            x, y = shapes
            return [y]

        @as_op([dmatrix, dvector], dvector, infer_shape)
        def cumprod_plus(x, y):
            return np.cumprod(x) + y

        self._compile_and_check([x, y], [cumprod_plus(x, y)],
                                [[[1.5, 5], [2, 2]], [1, 100, 2, 200]],
                                cumprod_plus.__class__, warn=False)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_param_strict(self):

        a = tensor.dvector()
        b = shared(7)
        out = a + b

        f = pfunc([In(a, strict=False)], [out])
        # works, rand generates float64 by default
        f(numpy.random.rand(8))
        # works, casting is allowed
        f(numpy.array([1, 2, 3, 4], dtype='int32'))

        f = pfunc([In(a, strict=True)], [out])
        try:
            # fails, f expects float64
            f(numpy.array([1, 2, 3, 4], dtype='int32'))
        except TypeError:
            pass
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_param_mutable(self):
        a = tensor.dvector()
        a_out = a * 2  # assuming the op which makes this "in place" triggers

        # using mutable=True will let fip change the value in aval
        fip = pfunc([In(a, mutable=True)], [a_out], mode='FAST_RUN')
        aval = numpy.random.rand(10)
        aval2 = aval.copy()
        assert numpy.all(fip(aval) == (aval2 * 2))
        assert not numpy.all(aval == aval2)

        # using mutable=False should leave the input untouched
        f = pfunc([In(a, mutable=False)], [a_out], mode='FAST_RUN')
        aval = numpy.random.rand(10)
        aval2 = aval.copy()
        assert numpy.all(f(aval) == (aval2 * 2))
        assert numpy.all(aval == aval2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_cloning_replace_not_strict_copy_inputs(self):
        # This has nothing to do with scan, but it refers to the clone
        # function that scan uses internally and that pfunc uses now and
        # that users might want to use
        x = theano.tensor.vector('x')
        y = theano.tensor.fvector('y')
        y2 = theano.tensor.dvector('y2')
        z = theano.shared(0.25)

        f1 = z * (x + y) ** 2 + 5
        f2 = theano.clone(f1,
                          replace=OrderedDict([(y, y2)]),
                          strict=False,
                          share_inputs=True)
        f2_inp = theano.gof.graph.inputs([f2])
        assert z in f2_inp
        assert x in f2_inp
        assert y2 in f2_inp
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_cloning_replace_not_strict_not_copy_inputs(self):
        # This has nothing to do with scan, but it refers to the clone
        # function that scan uses internally and that pfunc uses now and
        # that users might want to use
        x = theano.tensor.vector('x')
        y = theano.tensor.fvector('y')
        y2 = theano.tensor.dvector('y2')
        z = theano.shared(0.25)

        f1 = z * (x + y) ** 2 + 5
        f2 = theano.clone(f1,
                          replace=[(y, y2)],
                          strict=False,
                          share_inputs=False)
        f2_inp = theano.gof.graph.inputs([f2])
        assert not z  in f2_inp
        assert not x  in f2_inp
        assert not y2 in f2_inp

    # TEST RE-ordering of inputs
    # some rnn with multiple outputs and multiple inputs; other
    # dimension instead of scalars/vectors
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2))#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.max( (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.sum( (([u ,u,u,u,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def get_output(self, train=False):
        print(len(self.layers))
        u=self.layers[0].get_output(train)
        t=self.layers[1].get_output(train)
        #tp=t[0]
        #tn=t[1]
        #un=T.dot(u,u)
        #return [T.dot(u,tp)/(un*T.dot(tp,tp)) ,T.dot(u,tn)/(un*T.dot(tn,tn))]
        #theano.printing.pprint('vals')
        #x=T.dvector()
        #printed_u = hello_world_op(x)
        #f = theano.function([x], printed_u)
        #f(['here'])

        #T.reshape(u,[2,1])
        #T.reshape(t,[1,2,2])
        #d=T.dot(t.dimshuffle(1, 0, 2), u)
        #u1=self.activation(u)
        #t.reshape([2,2,2])
        return T.sum( (([u ,u]*t.dimshuffle(1,0,2)).dimshuffle(1,0,2)),2)#.reshape([2,2])
        #return d.dimshuffle(1,0,2) #just dot product
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, embedding_size: int, vocabulary_size: int, empirical_distribution, representation_size: int,
                 hyperparameters: dict, encoder_type: str, name: str = "GRUSequenceSupervisedEncoder",
                 use_centroid=False):
        self.__hyperparameters = hyperparameters
        self.__name = name
        log_init_noise = self.__hyperparameters["log_init_noise"]

        self.__memory_size = representation_size
        self.__embedding_size = embedding_size

        embeddings = np.random.randn(vocabulary_size, embedding_size) * 10 ** log_init_noise
        self.__embeddings = theano.shared(embeddings.astype(theano.config.floatX), name=name + ":embeddings")
        self.__name_bias = theano.shared(np.log(empirical_distribution).astype(theano.config.floatX),
                                         name=name + ":name_bias")

        encoder_init_state = np.random.randn(representation_size) * 10 ** log_init_noise
        self.__encoder_init_state = theano.shared(encoder_init_state.astype(theano.config.floatX),
                                                  name=name + ":encoder_init_state")

        self.__rng = RandomStreams()

        self.__input_sequence = T.ivector(name + ":input_sequence")
        self.__output_sequence = T.ivector(name + ":output_sequence")
        self.__inverted_output_sequence = self.__output_sequence[::-1]
        if encoder_type == 'gru':
            self.__encoder = GRU(self.__embeddings, representation_size, embedding_size,
                                 self.__hyperparameters, self.__rng, name=name + ":GRUSequenceEncoder",
                                 use_centroid=use_centroid)
        elif encoder_type == 'averaging_gru':
            self.__encoder = AveragingGRU(self.__embeddings, representation_size, embedding_size,
                                          self.__hyperparameters, self.__rng,
                                          name=name + ":AveragingGRUSequenceEncoder", use_centroid=use_centroid)
        else:
            raise Exception("Unrecognized encoder type `%s`, possible options `gru` and `averaging_gru`")

        self.__params = {"embeddings": self.__embeddings,
                         "encoder_init_state": self.__encoder_init_state}
        self.__params.update(self.__encoder.get_params())
        self.__standalone_representation = T.dvector(self.__name + ":representation_input")
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pydotprint_cond_highlight():
    """
    This is a REALLY PARTIAL TEST.

    I did them to help debug stuff.
    """

    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()
    f = theano.function([x], x * 2)
    f([1, 2, 3, 4])

    s = StringIO()
    new_handler = logging.StreamHandler(s)
    new_handler.setLevel(logging.DEBUG)
    orig_handler = theano.logging_default_handler

    theano.theano_logger.removeHandler(orig_handler)
    theano.theano_logger.addHandler(new_handler)
    try:
        theano.printing.pydotprint(f, cond_highlight=True,
                                   print_output_file=False)
    finally:
        theano.theano_logger.addHandler(orig_handler)
        theano.theano_logger.removeHandler(new_handler)

    assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
            ' is no IfElse node in the graph\n')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pydotprint_return_image():
    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()
    ret = theano.printing.pydotprint(x * 2, return_image=True)
    assert isinstance(ret, (str, bytes))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pydotprint_variables():
    """
    This is a REALLY PARTIAL TEST.

    I did them to help debug stuff.

    It make sure the code run.
    """

    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    x = tensor.dvector()

    s = StringIO()
    new_handler = logging.StreamHandler(s)
    new_handler.setLevel(logging.DEBUG)
    orig_handler = theano.logging_default_handler

    theano.theano_logger.removeHandler(orig_handler)
    theano.theano_logger.addHandler(new_handler)
    try:
        theano.printing.pydotprint(x * 2)
        if not theano.printing.pd.__name__ == "pydot_ng":
            theano.printing.pydotprint_variables(x * 2)
    finally:
        theano.theano_logger.addHandler(orig_handler)
        theano.theano_logger.removeHandler(new_handler)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fail(self):
        """
        Test that conv2d fails for dimensions other than 2 or 3.
        """
        self.assertRaises(Exception, conv.conv2d, T.dtensor4(), T.dtensor3())
        self.assertRaises(Exception, conv.conv2d, T.dtensor3(), T.dvector())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inplace_norun(self):
        rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector,
                            inplace=True)
        assert rf.inplace
        assert getattr(rf, 'destroy_map', {}) != {}
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inplace_optimization(self):
        """Test that FAST_RUN includes the random_make_inplace optimization"""
        #inplace = False
        rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
        rng_R = random_state_type()

        # If calling RandomFunction directly, all args have to be specified,
        # because shape will have to be moved to the end
        post_r2, out2 = rf2(rng_R, (4,), 0., 1.)

        f = compile.function(
                [compile.In(rng_R,
                    value=numpy.random.RandomState(utt.fetch_seed()),
                    update=post_r2,
                    mutable=True)],
                out2,
                mode='FAST_RUN')  # DEBUG_MODE can't pass the id-based
                                  # test below

        # test that the RandomState object stays the same from function call to
        # function call, but that the values returned change from call to call.

        id0 = id(f[rng_R])
        val0 = f()
        assert id0 == id(f[rng_R])
        val1 = f()
        assert id0 == id(f[rng_R])

        assert not numpy.allclose(val0, val1)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_const_type_in_mul_canonizer():
    input = dmatrix()
    w = dmatrix()
    visb = dvector()
    hidb = dvector()
    betas = dvector()
    a = dvector()

    def sigm(x):
        return 1. / (1 + tensor.exp(-x))

    hid = sigm((tensor.dot(w, input) + hidb) * betas)

    vis_gauss1 = (tensor.dot(w.T, hid) + visb) * betas / (2 * a * a)
    vis_gauss2 = (tensor.dot(w.T, hid) + visb) * betas / (2. * a * a)

    f1 = function([input, w, visb, hidb, betas, a], vis_gauss1)
    f2 = function([input, w, visb, hidb, betas, a], vis_gauss2)

    ival = numpy.random.rand(5, 5)
    wval = numpy.random.rand(5, 5)
    visbval = numpy.random.rand(5)
    hidbval = numpy.random.rand(5)
    betaval = numpy.random.rand(5)
    aval = numpy.random.rand(5)

    utt.assert_allclose(
        f2(ival, wval, visbval, hidbval, betaval, aval),
        f1(ival, wval, visbval, hidbval, betaval, aval))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle_big_fusion(self):
        """In the past, pickle of Composite generated in tha case
        crashed with max recusion limit. So we where not able to
        generate C code in that case.

        """
        if not theano.config.cxx:
            raise SkipTest("no c compiler, so can't use big elemwise!")
        factors = []
        sd = tensor.dscalar()
        means = tensor.dvector()

        cst_05 = theano.tensor.constant(.5)
        cst_m05 = theano.tensor.constant(-.5)
        cst_2 = theano.tensor.constant(2)
        cst_m2 = theano.tensor.constant(-2)
        ones = theano.tensor.constant(numpy.ones(10))
        n = 85
        if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
            n = 10

        for i in xrange(n):
            f = (cst_m05 * sd ** cst_m2 * (ones - means[i]) ** cst_2 +
                 cst_05 * tensor.log(cst_05 * (sd ** cst_m2) / numpy.pi))
            factors.append(tensor.sum(f))

        logp = tensor.add(*factors)

        vars = [sd, means]
        dlogp = function(vars, [theano.grad(logp, v) for v in vars])
        dlogp(2, numpy.random.rand(n))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_log_add():
    m = theano.config.mode
    if m == 'FAST_COMPILE':
        m = 'FAST_RUN'
    m = compile.mode.get_mode(m)
    m = m.excluding('fusion')
    m = copy.copy(m)
    # No need to put them back as we have a new object
    m.check_isfinite = False

    # check some basic cases
    x = dvector()
    y = dvector()
    f = function([x, y], T.log(T.exp(x) + T.exp(y)), mode=m)

    f([10000], [10000])  # causes overflow if handled incorrectly
    assert numpy.isfinite(f([10000], [10000]))
    utt.assert_allclose(f([10000], [10000]), 10000 + numpy.log1p(1))

    # test that it give the same result when it don't overflow
    f([10], [10])  # don't causes overflow
    utt.assert_allclose(f([10], [10]), 10 + numpy.log1p(1))

    # test that it also works with more than two args, (this currently fails)
    x = dvector()
    y = dvector()
    f = function([x, y], T.log(T.exp(x) + T.exp(y) + T.exp(x - y) + T.exp(
        x + y)), mode=m)

    try:
        f([10000], [10000])  # causes overflow if handled incorrectly
        utt.assert_allclose(f([10000], [10000]), 20000)
    except utt.WrongValue:
        raise SkipTest("log(add(exp)) is not stabilized when adding "
                       "more than 2 elements, see #623")

    # TODO: test that the optimization works in the presence of broadcasting.

    # TODO: (write and) test that the optimization works with Sum in addition to working with Add.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_mul_switch_sink(self):
        c = T.dscalar()
        idx = 0
        for condition in [(T.dmatrix('cond'), self.condm),
                          (T.dvector('cond'), self.condv),
                          (T.dscalar('cond'), self.conds)]:
            for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv),
                      (T.dscalar('x'), self.xs)]:
                y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]),
                          T.switch(condition[0] > 0,
                                   1. * x[0], T.log(c) * x[0]))
                f = theano.function([condition[0], x[0], c],
                                    [y], mode=self.mode)
                if type(condition[1]) is list:
                    for i in xrange(len(condition[1])):
                        res = f(condition[1][i], x[1], -1)
                        assert (res == numpy.asarray(
                            self.resm[idx][i])).sum() == self.resm[idx][i].size
                else:
                    res = f(condition[1], x[1], -1)
                    assert (res == numpy.asarray(self.
                        resm[idx])).sum() == self.resm[idx].size
                idx += 1

        # This case caused a missed optimization in the past.
        x = T.dscalar('x')
        y = T.switch(x < 7, x, T.sqrt(x - 7))
        f = theano.function([x], T.grad(y, x), self.mode)
        assert f(5) == 1, f(5)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_upcast_elemwise_constant_inputs():
    s = dvector("s")
    x = tensor.sum(tensor.log(10 ** s))
    f = function([s], [tensor.grad(x, s)])
    f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12])

    # This test a corner where the optimization should not be applied.
    old = theano.config.floatX
    theano.config.floatX = 'float32'
    try:
        v = lvector()
        function([v], theano.tensor.basic.true_div(v, 2))
    finally:
        theano.config.floatX = old
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test3(self):
        a = tensor.dvector()
        w2 = sort(a)
        f = theano.function([a], w2)
        gv = f(self.v_val)
        gt = np.sort(self.v_val)
        assert np.allclose(gv, gt)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test0(self):
        self.assertTrue(_is_real_matrix(T.DimShuffle([False, False],
                                                     [1, 0])(T.matrix())))
        self.assertTrue(not _is_real_matrix(T.DimShuffle([False],
                                                         ['x', 0])
                                            (T.dvector())))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_normal_vector(self):
        random = RandomStreams(utt.fetch_seed())
        avg = tensor.dvector()
        std = tensor.dvector()
        out = random.normal(avg=avg, std=std)
        assert out.ndim == 1
        f = function([avg, std], out)

        avg_val = [1, 2, 3]
        std_val = [.1, .2, .3]
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(avg_val, std_val)
        numpy_val0 = numpy_rng.normal(loc=avg_val, scale=std_val)
        assert numpy.allclose(val0, numpy_val0)

        # arguments of size (2,)
        val1 = f(avg_val[:-1], std_val[:-1])
        numpy_val1 = numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1])
        assert numpy.allclose(val1, numpy_val1)

        # Specifying the size explicitly
        g = function([avg, std], random.normal(avg=avg, std=std, size=(3,)))
        val2 = g(avg_val, std_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy_rng.normal(loc=avg_val, scale=std_val, size=(3,))
        assert numpy.allclose(val2, numpy_val2)
        self.assertRaises(ValueError, g, avg_val[:-1], std_val[:-1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_multiple_inplace(self):
        x = tensor.dmatrix('x')
        y = tensor.dvector('y')
        z = tensor.dvector('z')
        f = theano.function([x, y, z],
                            [tensor.dot(y, x), tensor.dot(z,x)],
                            mode=mode_blas_opt)
        vx = numpy.random.rand(3, 3)
        vy = numpy.random.rand(3)
        vz = numpy.random.rand(3)
        out = f(vx, vy, vz)
        assert numpy.allclose(out[0], numpy.dot(vy, vx))
        assert numpy.allclose(out[1], numpy.dot(vz, vx))
        assert len([n for n in f.maker.fgraph.apply_nodes
                    if isinstance(n.op, tensor.AllocEmpty)]) == 2
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_reshape_long_in_shape(self):
        v = dvector('v')
        r = v.reshape((v.shape[0], L(1)))
        print(r.eval({v: numpy.arange(5.)}))
        assert numpy.allclose(r.eval({v: numpy.arange(5.)}).T,
                              numpy.arange(5.))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_smallest():
    x = dvector()
    y = dvector()
    z = dvector()
    f1 = inplace_func([x], smallest(x))
    assert numpy.all([1, 2, 3] == f1([1, 2, 3]))
    f3 = inplace_func([x, y, z], smallest(x, y, z))
    assert numpy.all([1, 2, 3] == f3([1, 3, 9], [7, 7, 7], [8, 2, 3]))

    sx, sy = dscalar(), dscalar()

    assert -4 == inplace_func([sx, sy], smallest(sx, sy))(-4.0, -2.0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_wrong_input(self):
        """
        Make sure errors are raised when image and kernel are not 4D tensors
        """
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dmatrix())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', filters=T.dvector())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dtensor3())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_wrong_input(self):
        """
        Make sure errors are raised when image and kernel are not 4D tensors
        """
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dmatrix())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', filters=T.dvector())
        self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),
                          'valid', input=T.dtensor3())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_1arg(self):
        x = dmatrix('x')

        @as_op(dmatrix, dvector)
        def cumprod(x):
            return np.cumprod(x)

        fn = function([x], cumprod(x))
        r = fn([[1.5, 5], [2, 2]])
        r0 = np.array([1.5, 7.5, 15., 30.])

        assert allclose(r, r0), (r, r0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_2arg(self):
        x = dmatrix('x')
        x.tag.test_value = np.zeros((2, 2))
        y = dvector('y')
        y.tag.test_value = [0, 0]

        @as_op([dmatrix, dvector], dvector)
        def cumprod_plus(x, y):
            return np.cumprod(x) + y

        fn = function([x, y], cumprod_plus(x, y))
        r = fn([[1.5, 5], [2, 2]], [1, 100, 2, 200])
        r0 = np.array([2.5, 107.5, 17., 230.])

        assert allclose(r, r0), (r, r0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self,
                 input=tensor.dvector('input'),
                 target=tensor.dvector('target'),
                 n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
        super(NNet, self).__init__(**kw)

        self.input = input
        self.target = target
        self.lr = shared(lr, 'learning_rate')
        self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
        self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
        # print self.lr.type

        self.hidden = sigmoid(tensor.dot(self.w1, self.input))
        self.output = tensor.dot(self.w2, self.hidden)
        self.cost = tensor.sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates)

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_input_aliasing_affecting_inplace_operations(self):

        # Note: to trigger this bug with theano rev 4586:2bc6fc7f218b,
        #        you need to make in inputs mutable (so that inplace
        #        operations are used) and to break the elemwise composition
        #        with some non-elemwise op (here dot)
        x = theano.tensor.dvector()
        y = theano.tensor.dvector()
        m1 = theano.tensor.dmatrix()
        m2 = theano.tensor.dmatrix()
        f = theano.function([theano.In(x, mutable=True),
                             theano.In(y, mutable=True),
                             theano.In(m1, mutable=True),
                             theano.In(m2, mutable=True)],
                            theano.dot((x * 2), m1) + theano.dot((y * 3), m2))
        # Test 1. If the same variable is given twice

        # Compute bogus values
        v = numpy.asarray([1, 2, 3, 4, 5], dtype='float64')
        m = numpy.asarray([[1, 0, 0, 0, 0],
                           [0, 1, 0, 0, 0],
                           [0, 0, 1, 0, 0],
                           [0, 0, 0, 1, 0],
                           [0, 0, 0, 0, 1]], dtype='float64')
        bogus_vals = f(v, v, m, m)
        # Since we used inplace operation v and m may be corrupted
        # so we need to recreate them

        v = numpy.asarray([1, 2, 3, 4, 5], dtype='float64')
        m = numpy.asarray([[1, 0, 0, 0, 0],
                           [0, 1, 0, 0, 0],
                           [0, 0, 1, 0, 0],
                           [0, 0, 0, 1, 0],
                           [0, 0, 0, 0, 1]], dtype='float64')
        m_copy = m.copy()
        v_copy = v.copy()
        vals = f(v, v_copy, m, m_copy)

        assert numpy.allclose(vals, bogus_vals)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_leak_many_call_lazy():
        # Verify no memory leaks when calling a function a lot of times

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak

        def build_graph(x, depth=5):
            z = x
            for d in range(depth):
                z = ifelse(z.mean() > 0.5, -z, z)
            return z

        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a,
                           mode=Mode(optimizer=None,
                                     linker=linker()))
            inp = numpy.random.rand(1000000)
            for i in xrange(100):
                f_a(inp)
            if 0:  # this doesn't seem to work, prints 0 for everything
                import resource
                pre = resource.getrusage(resource.RUSAGE_SELF)
                post = resource.getrusage(resource.RUSAGE_SELF)
                print(pre.ru_ixrss, post.ru_ixrss)
                print(pre.ru_idrss, post.ru_idrss)
                print(pre.ru_maxrss, post.ru_maxrss)
        print(1)
        time_linker('vmLinker_C',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
        print(2)
        time_linker('vmLinker',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_using_taps_sequence(self):
        # this test refers to a bug reported by Nicolas
        # Boulanger-Lewandowski June 6th
        x = theano.tensor.dvector()
        y, updates = theano.scan(lambda x: [x],
                                 sequences=dict(input=x, taps=[-1]),
                                 outputs_info=[None])
        inp = numpy.arange(5).astype('float64')
        rval = theano.function([x], y, updates=updates)(inp)
        assert numpy.all(rval == inp[:-1])