Python theano.tensor 模块,dscalar() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用theano.tensor.dscalar()

项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def test_Relu():
    unit = Relu()

    assert len(unit.getpara()) == 0

    x = T.dscalar()
    y = unit.forward((x,))
    f = theano.function(inputs=[x,],
                        outputs=y,
                        allow_input_downcast=True)
    assert abs(f(1.0)[0] - 1.0) < 0.01
    assert abs(f(-1.0)[0]) < 0.01

    inputsize = [128,2,2,2]
    outputsize = unit.forwardSize(inputsize)
    assert all([i == j for i, j in zip(inputsize, outputsize)])
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def test_elu():
    unit = Elu()

    assert len(unit.getpara()) == 0

    x = T.dscalar()
    y = unit.forward((x,))
    f = theano.function(inputs=[x,],
                        outputs=y,
                        allow_input_downcast=True)
    assert abs(f(1.0)[0] - 1.0) < 0.01
    assert abs(f(-100.0)[0] + 1.0) < 0.01
    assert abs(f(-0.0001)[0]) < 0.01

    inputsize = [128,2,2,2]
    outputsize = unit.forwardSize(inputsize)
    assert all([i == j for i, j in zip(inputsize, outputsize)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):

        adscal = dscalar()
        bdscal = dscalar()
        adscal_val = numpy.random.rand()
        bdscal_val = numpy.random.rand() + 1
        out = theano.tensor.opt.assert_op(adscal, bdscal)
        self._compile_and_check([adscal, bdscal], [out],
                                [adscal_val, bdscal_val], Assert)

        admat = dmatrix()
        admat_val = numpy.random.rand(3, 4)
        adscal_val += 1
        out = theano.tensor.opt.assert_op(admat, adscal, bdscal)
        self._compile_and_check([admat, adscal, bdscal], [out],
                                [admat_val, adscal_val, bdscal_val], Assert)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        adscal = dscalar()
        bdscal = dscalar()
        aiscal = iscalar()
        biscal = iscalar()
        ciscal = iscalar()
        discal = iscalar()
        adscal_val = numpy.random.rand()
        bdscal_val = numpy.random.rand()
        aiscal_val = numpy.random.randint(10)
        biscal_val = numpy.random.randint(10)
        ciscal_val = numpy.random.randint(10)
        discal_val = numpy.random.randint(10)
        self._compile_and_check([adscal, aiscal],
                            [MakeVector('float64')(adscal, aiscal)],
                            [adscal_val, aiscal_val], MakeVector)

        self._compile_and_check([adscal, bdscal, aiscal],
                            [MakeVector('float64')(adscal, bdscal, aiscal)],
                            [adscal_val, bdscal_val, aiscal_val], MakeVector)

        self._compile_and_check([aiscal, biscal, ciscal, discal],
                    [MakeVector('int32')(aiscal, biscal, ciscal, discal)],
                    [aiscal_val, biscal_val, ciscal_val, discal_val],
                     MakeVector)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.iv = T.tensor(dtype='int32', broadcastable=(False,))
        self.fv = T.tensor(dtype='float32', broadcastable=(False,))
        self.fv1 = T.tensor(dtype='float32', broadcastable=(True,))
        self.dv = T.tensor(dtype='float64', broadcastable=(False,))
        self.dv1 = T.tensor(dtype='float64', broadcastable=(True,))
        self.cv = T.tensor(dtype='complex64', broadcastable=(False,))
        self.zv = T.tensor(dtype='complex128', broadcastable=(False,))

        self.fv_2 = T.tensor(dtype='float32', broadcastable=(False,))
        self.fv1_2 = T.tensor(dtype='float32', broadcastable=(True,))
        self.dv_2 = T.tensor(dtype='float64', broadcastable=(False,))
        self.dv1_2 = T.tensor(dtype='float64', broadcastable=(True,))
        self.cv_2 = T.tensor(dtype='complex64', broadcastable=(False,))
        self.zv_2 = T.tensor(dtype='complex128', broadcastable=(False,))

        self.fm = T.fmatrix()
        self.dm = T.dmatrix()
        self.cm = T.cmatrix()
        self.zm = T.zmatrix()

        self.fa = T.fscalar()
        self.da = T.dscalar()
        self.ca = T.cscalar()
        self.za = T.zscalar()
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def test_properties():
    n = N.Network()

    va = n.modelPrefix
    vb = 'test'
    n.modelPrefix = vb
    assert n.modelPrefix != va
    assert n.modelPrefix == vb

    va = n.batchSize
    vb = 39
    n.batchSize = vb
    assert n.batchSize != va
    assert n.batchSize == vb

    va = n.saveInterval
    vb = 939
    n.saveInterval = vb
    assert n.saveInterval != va
    assert n.saveInterval == vb

    va = n.costFunction
    vb = cost.CostFunc
    n.costFunction = vb
    assert n.costFunction != va
    assert n.costFunction == vb

    va = n.inputOutputType
    vb = (T.dscalar(), T.dscalar())
    n.inputOutputType = vb
    assert all([v1.type != v2.type for v1, v2 in zip(n.inputOutputType, va)])
    assert all([v1.type == v2.type for v1, v2 in zip(n.inputOutputType, vb)])

    va = n.learningRate
    vb = 99
    n.learningRate = vb
    assert n.learningRate != va
    assert n.learningRate == vb
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_abs_mul_div(self):
        """
        test that if we have
        4 * x / abs(2*x) it get simplifier during canonicalisation.
        """

        x = T.dscalar()
        a = T.abs_(x)

        if theano.config.mode == 'FAST_COMPILE':
            mode = theano.compile.mode.get_mode('FAST_RUN').excluding(
                    "local_elemwise_fusion")
        else:
            mode = theano.compile.mode.get_default_mode().excluding(
                    "local_elemwise_fusion")

        f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn

        f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_mixeddiv():
    """Test that int division is preserved"""
    i = iscalar()
    d = dscalar()
    assert 0 == function([i, d], d * (i // (i + 1)))(3, 1.0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle_big_fusion(self):
        """In the past, pickle of Composite generated in tha case
        crashed with max recusion limit. So we where not able to
        generate C code in that case.

        """
        if not theano.config.cxx:
            raise SkipTest("no c compiler, so can't use big elemwise!")
        factors = []
        sd = tensor.dscalar()
        means = tensor.dvector()

        cst_05 = theano.tensor.constant(.5)
        cst_m05 = theano.tensor.constant(-.5)
        cst_2 = theano.tensor.constant(2)
        cst_m2 = theano.tensor.constant(-2)
        ones = theano.tensor.constant(numpy.ones(10))
        n = 85
        if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
            n = 10

        for i in xrange(n):
            f = (cst_m05 * sd ** cst_m2 * (ones - means[i]) ** cst_2 +
                 cst_05 * tensor.log(cst_05 * (sd ** cst_m2) / numpy.pi))
            factors.append(tensor.sum(f))

        logp = tensor.add(*factors)

        vars = [sd, means]
        dlogp = function(vars, [theano.grad(logp, v) for v in vars])
        dlogp(2, numpy.random.rand(n))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_mul_switch_sink(self):
        c = T.dscalar()
        idx = 0
        for condition in [(T.dmatrix('cond'), self.condm),
                          (T.dvector('cond'), self.condv),
                          (T.dscalar('cond'), self.conds)]:
            for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv),
                      (T.dscalar('x'), self.xs)]:
                y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]),
                          T.switch(condition[0] > 0,
                                   1. * x[0], T.log(c) * x[0]))
                f = theano.function([condition[0], x[0], c],
                                    [y], mode=self.mode)
                if type(condition[1]) is list:
                    for i in xrange(len(condition[1])):
                        res = f(condition[1][i], x[1], -1)
                        assert (res == numpy.asarray(
                            self.resm[idx][i])).sum() == self.resm[idx][i].size
                else:
                    res = f(condition[1], x[1], -1)
                    assert (res == numpy.asarray(self.
                        resm[idx])).sum() == self.resm[idx].size
                idx += 1

        # This case caused a missed optimization in the past.
        x = T.dscalar('x')
        y = T.switch(x < 7, x, T.sqrt(x - 7))
        f = theano.function([x], T.grad(y, x), self.mode)
        assert f(5) == 1, f(5)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        z = tensor.dtensor3()
        x = tensor.dmatrix()
        y = tensor.dscalar()
        self._compile_and_check([x, y], [self.op(x, y)],
                                [numpy.random.rand(8, 5),
                                 numpy.random.rand()],
                                self.op_class)
        self._compile_and_check([z, y], [self.op(z, y)],
                                # must be square when nd>2
                                [numpy.random.rand(8, 8, 8),
                                 numpy.random.rand()],
                                self.op_class,
                                warn=False)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        x = tensor.dmatrix()
        y = tensor.dscalar()
        z = tensor.iscalar()
        for test_offset in (-5, -4, -1, 0, 1, 4, 5):
            self._compile_and_check([x, y, z], [self.op(x, y, z)],
                                    [numpy.random.rand(8, 5),
                                     numpy.random.rand(),
                                     test_offset],
                                    self.op_class)
            self._compile_and_check([x, y, z], [self.op(x, y, z)],
                                    [numpy.random.rand(5, 8),
                                     numpy.random.rand(),
                                     test_offset],
                                    self.op_class)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test2(self):
        """Test that it works on scalar variables"""
        a = T.dscalar()
        d_a = T.DimShuffle([], [])(a)
        d_a2 = T.DimShuffle([], ['x', 'x'])(a)

        self.assertTrue(_as_scalar(a) is a)
        self.assertTrue(_as_scalar(d_a) is a)
        self.assertTrue(_as_scalar(d_a2) is a)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_upcasting_scalar_nogemm():
    # Test that the optimization does not crash when the scale has an incorrect
    # dtype, and forces upcasting of the result
    v = T.fmatrix('v')
    w = T.fmatrix('w')
    t = T.fmatrix('t')
    alpha = T.dscalar('a')

    rval = T.dot(w, v) * alpha + t

    f = theano.function([w, v, t, alpha], rval)
    t = f.maker.fgraph.toposort()
    assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
    #theano.printing.debugprint(f, print_type=True)

    v = T.fmatrix('v')
    w = T.fmatrix('w')
    t = T.fmatrix('t')
    alpha = T.cscalar('a')

    on_opt_error = config.on_opt_error
    try:
        config.on_opt_error = 'raise'
        rval = T.dot(w, v) * alpha + t
        f = theano.function([w, v, t, alpha], rval)
    finally:
        config.on_opt_error = on_opt_error

    t = f.maker.fgraph.toposort()
    assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
    #theano.printing.debugprint(f, print_type=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_upcasting_scalar_nogemv(self):
        # Test that the optimization does not crash when the scale has
        # an incorrect dtype, and forces upcasting of the result
        # We put this test in this class to test it on the gpu too.
        vs = self.get_data()
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha_v = alpha_v.astype("float64")
        a_v = a_v.astype("float32")
        x_v = x_v.astype("float32")
        y_v = y_v.astype("float32")

        alpha = T.dscalar('alpha')
        a = self.shared(a_v)
        x = self.shared(x_v)
        y = self.shared(y_v)

        rval = T.dot(a, x) * alpha + y

        f = theano.function([alpha], rval, mode=self.mode)
        # this function is currently optimized so that the gemv is
        # done inplace on a temporarily allocated-buffer, which is
        # then scaled by alpha and to t with a fused elemwise.
        n_gemvs = 0
        #theano.printing.debugprint(f, print_type=True)
        for node in f.maker.fgraph.toposort():
            if node.op == self.gemv_inplace:
                n_gemvs += 1
                assert node.outputs[0].dtype == 'float32'
        assert n_gemvs == 1, n_gemvs
        self.assertFunctionContains1(f, self.gemv_inplace)
        f(alpha_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_default_dtype(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.dscalar()
        high = tensor.dscalar()

        # Should not silently downcast from low and high
        out0 = random.uniform(low=low, high=high, size=(42,))
        assert out0.dtype == 'float64'
        f0 = function([low, high], out0)
        val0 = f0(-2.1, 3.1)
        assert val0.dtype == 'float64'

        # Should downcast, since asked explicitly
        out1 = random.uniform(low=low, high=high, size=(42,), dtype='float32')
        assert out1.dtype == 'float32'
        f1 = function([low, high], out1)
        val1 = f1(-1.1, 1.1)
        assert val1.dtype == 'float32'

        # Should use floatX
        lowf = tensor.fscalar()
        highf = tensor.fscalar()
        outf = random.uniform(low=lowf, high=highf, size=(42,))
        assert outf.dtype == config.floatX
        ff = function([lowf, highf], outf)
        valf = ff(numpy.float32(-0.1), numpy.float32(0.3))
        assert valf.dtype == config.floatX
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_simple_2d(self):
        """Increments or sets part of a tensor by a scalar using full slice and
        a partial slice depending on a scalar.
        """
        a = tt.dmatrix()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)

        for do_set in [False, True]:

            if do_set:
                resut = tt.set_subtensor(a[sl1, sl2], increment)
            else:
                resut = tt.inc_subtensor(a[sl1, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            val_a = numpy.ones((5, 5))
            val_inc = 2.3
            val_sl2_end = 2

            result = f(val_a, val_inc, val_sl2_end)

            expected_result = numpy.copy(val_a)
            if do_set:
                expected_result[:, :val_sl2_end] = val_inc
            else:
                expected_result[:, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_divide_floats(self):
        a = T.dscalar('a')
        b = T.dscalar('b')
        c = theano.function([a, b], b / a)
        d = theano.function([a, b], b // a)
        assert c(6, 3) == 0.5
        assert d(6, 3) == 0.0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_divide_floats(self):
        a = T.dscalar('a')
        b = T.dscalar('b')
        c = theano.function([a, b], b / a)
        d = theano.function([a, b], b // a)
        assert c(6, 3) == 0.5
        assert d(6, 3) == 0.0
项目:deep-hashtagprediction    作者:jderiu    | 项目源码 | 文件源码
def get_warp_loss_updates(cost,params):
    print "Generating WARP updates"
    updates = OrderedDict({})

    y = T.dscalar('y')

    for param in params:
        up = theano.function([y],cost)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_simple_3d(self):
        """Increments or sets part of a tensor by a scalar using full slice and
        a partial slice depending on a scalar.
        """
        a = tt.dtensor3()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)
        sl3 = 2

        val_a = numpy.ones((5, 3, 4))
        val_inc = 2.3
        val_sl2_end = 2

        for method in [tt.set_subtensor, tt.inc_subtensor]:
            print("MethodSet", method)

            resut = method(a[sl1, sl3, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            expected_result = numpy.copy(val_a)
            result = f(val_a, val_inc, val_sl2_end)

            if method is tt.set_subtensor:
                expected_result[:, sl3, :val_sl2_end] = val_inc
            else:
                expected_result[:, sl3, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result)

            # Test when we broadcast the result
            resut = method(a[sl1, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            expected_result = numpy.copy(val_a)
            result = f(val_a, val_inc, val_sl2_end)

            if method is tt.set_subtensor:
                expected_result[:, :val_sl2_end] = val_inc
            else:
                expected_result[:, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result)