Python theano 模块,Mode() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用theano.Mode()

项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1)):
        o = self.get_output_shape(i, f, s, b, fd)
        mode = theano.Mode(optimizer=None)

        self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                     verify_grad=True, provide_shape=provide_shape,
                     border_mode=b, filter_flip=flip,
                     target_op=None, check_trace=True,
                     filter_dilation=fd, mode=mode)
        self.run_gradweight(inputs_shape=i, filters_shape=f,
                            output_shape=o, subsample=s, verify_grad=True,
                            provide_shape=provide_shape, border_mode=b,
                            filter_flip=flip, target_op=None,
                            check_trace=True, filter_dilation=fd,
                            mode=mode)
        self.run_gradinput(inputs_shape=i, filters_shape=f,
                           output_shape=o, subsample=s, verify_grad=True,
                           provide_shape=provide_shape, border_mode=b,
                           filter_flip=flip, target_op=None,
                           check_trace=True, filter_dilation=fd,
                           mode=mode)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_output_from_implace():

    x = T.matrix()
    y = T.matrix()
    a = T.dot(x, y)
    b = T.tanh(a)

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = theano.function([x, y], b, mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map)

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace())
    mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9))

    fct_opt = theano.function([x, y], b, mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert (not hasattr(op, 'destroy_map') or 0 not in op.destroy_map)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_borrow_output(self):
        a = T.dmatrix()
        f = function([a], Out(a, borrow=False))
        o = N.ones((3, 3))
        assert o is not f(o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = N.ones((3, 3))
        four = f(o)
        assert numpy.all(four == 4)
        f(o + .1)  # should not clobber the memory used to store four
        assert numpy.all(four == 4)

        f = function([a], Out(a * 4, borrow=True), mode=theano.Mode('c|py_nogc', 'fast_run'))
        o = N.ones((3, 3))
        four = f(o)
        assert numpy.all(four == 4)
        f(o + .1)  # should clobber the memory used to store four
        if theano.config.cxx:
            assert not numpy.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert numpy.all(four == 4)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_c_thunks():
    a = tensor.scalars('a')
    b, c = tensor.vectors('bc')
    cases = [False]
    if theano.config.cxx:
        cases.append(True)
    for c_thunks in cases:
        f = function([a, b, c], ifelse(a, a * b, b * c),
                     mode=Mode(
                         optimizer=None,
                         linker=vm.VM_Linker(c_thunks=c_thunks,
                                             use_cloop=False)))
        f(1, [2], [3, 2])
        from nose.tools import assert_raises
        assert_raises(ValueError, f, 0, [2], [3, 4])
        assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_partial_function():
    import numpy as np
    from theano.tests import unittest_tools as utt

    def check_partial_function(linker_name):
        x = tensor.scalar('input')
        y = x ** 2
        f = theano.function([x], [y + 7, y - 9, y / 14.], mode=Mode(
            optimizer=None, linker=linker_name))

        assert f(3, output_subset=[0, 1, 2]) == f(3)
        assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]
        utt.assert_allclose(f(5), np.array([32., 16., 1.7857142857142858]))

    check_partial_function(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
    check_partial_function('cvm')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_partial_function_with_updates():

    def check_updates(linker_name):
        x = tensor.lscalar('input')
        y = theano.shared(numpy.asarray(1, 'int64'), name='global')
        f = theano.function([x], [x, x + 34], updates=[(y, x + 1)], mode=Mode(
            optimizer=None, linker=linker_name))
        g = theano.function([x], [x - 6], updates=[(y, y + 3)], mode=Mode(
            optimizer=None, linker=linker_name))

        assert f(3, output_subset=[]) == []
        assert y.get_value() == 4
        assert g(30, output_subset=[0]) == [24]
        assert g(40, output_subset=[]) == []
        assert y.get_value() == 10

    check_updates(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
    check_updates('cvm')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_vm_gc():
    """This already caused a bug in the trunk of Theano.

    The bug was introduced in the trunk on July 5th, 2012 and fixed on
    July 30th.

    """
    x = theano.tensor.vector()
    p = RunOnce()(x)
    mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
    f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2],
                        mode=mode)
    f([1, 2, 3])

    p = RunOnce()(x)
    pp = p + p
    f = theano.function([x], [pp + pp],
                        mode=mode)
    f([1, 2, 3])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sort_schedule_fn():
    import theano
    from theano.gof.sched import sort_schedule_fn, make_depends
    x = theano.tensor.matrix('x')
    y = theano.tensor.dot(x[:5] * 2, x.T + 1).T

    def str_cmp(a, b):
        return cmp(str(a), str(b))  # lexicographical sort

    linker = theano.OpWiseCLinker(schedule=sort_schedule_fn(str_cmp))
    mode = theano.Mode(linker=linker)
    f = theano.function((x,), (y,), mode=mode)

    nodes = f.maker.linker.make_all()[-1]
    depends = make_depends()
    for a, b in zip(nodes[:-1], nodes[1:]):
        if not depends((b, a)):
            assert str(a) < str(b)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_deepcopy():
    a = cuda.fmatrix()
    a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))

    # We force the c code to check that we generate c code
    mode = theano.Mode("c", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))

    # We force the python linker as the default code should work for this op
    mode = theano.Mode("py", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def dnn_version():
    """Return the current cuDNN version we compile with.

    This returns a tuple with the header version and the library
    version we link with. For older cudnn version without version
    information, we return -1.

    """
    if not dnn_available():
        raise Exception(
            "We can't determine the cudnn version as it is not available",
            dnn_available.msg)

    if dnn_version.v is None:
        f = theano.function([], DnnVersion()(),
                            theano.Mode(optimizer=None),
                            profile=False)
        dnn_version.v = f()
    return dnn_version.v
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def compiling_training_function(self):
        self.train_fn = theano.function(inputs=[self.X_tv2, self.Y_tv2, self.M_tv2],
                                   outputs=[self.t_net_out, self.t_error],
                                   updates=self.updates,
                                   allow_input_downcast=True
                                   #mode='DebugMode',
                                   #profile=profile,
                                   #mode=theano.Mode(linker='c'),
                                    )
        #return train_fn
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def version(raises=True):
    """
    Return the current cuDNN version we link with.

    This also does a check that the header version matches the runtime version.

    :raises: If True, raise an exception if cuDNN is not present or badly installed.
        Otherwise, return -1.

    """
    if not dnn_present():
        if raises:
            raise Exception(
                "We can't determine the cudnn version as it is not available",
                dnn_available.msg)
        else:
            return -1

    if version.v is None:
        f = theano.function([], DnnVersion()(),
                            theano.Mode(optimizer=None),
                            profile=False)
        v = f()
        if v[0] != v[1]:
            raise RuntimeError("Mixed dnn version. The header is version %s "
                               "while the library is version %s." % v)
        version.v = v[1]
    return version.v
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _make_dropout_desc(dropout, seed, context_name):
    desc, states = theano.function(
        [],
        _DropoutDescriptor(context_name)(dropout, seed, context_name),
        theano.Mode(optimizer=None),
        profile=False)()
    return desc, states
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _make_rnn_desc(hidden_size, num_layers, ddesc, rnn_mode,
                   input_mode, direction_mode, dtype, context_name):
    desc = theano.function(
        [],
        _RNNDescriptor(context_name)(hidden_size, num_layers, ddesc,
                                     input_mode, direction_mode,
                                     rnn_mode, dtype),
        theano.Mode(optimizer=None),
        profile=False)()
    return desc
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _get_param_size(desc, input_size, dtype, context_name):
    typecode = gpuarray.dtype_to_typecode(dtype)
    return theano.function(
        [],
        _RNNParamSize(context_name)(desc, input_size, typecode),
        theano.Mode(optimizer=None),
        profile=False)()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _split_rnn_params(w, desc, layer, input_size, dtype, rnn_mode):
    typecode = gpuarray.dtype_to_typecode(dtype)
    outs = _RNNSplitParams(rnn_mode)(w, desc, layer, input_size, typecode)
    outs = [theano.Out(o, borrow=True) for o in outs]
    return theano.function(
        [], outs,
        theano.Mode(optimizer=None),
        profile=False)()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def speed(self):
        n_calls = 20000
        print("n_calls", n_calls)
        for border_mode in ['valid', 'full']:
            print()
            print(border_mode)
            for openmp in [False, True]:
                print("OpenMP", openmp)
                image_shapes = [
                    (1, 5, 6, 6),
                    (10, 5, 6, 6)
                    # (10, 10, 16, 16),
                    # (10, 10, 32, 32)]
                ]
                print("image_shape", image_shapes)
                for image_shape in image_shapes:
                    filter_shapes = [(1, 5, 4, 4), (2, 5, 4, 4), (5, 5, 4, 4)]
                    print("filter_shapes", filter_shapes)
                    for filter_shape in filter_shapes:

                        input = theano.shared(numpy.random.random(image_shape))
                        filters = theano.shared(numpy.random.random(filter_shape))

                        output = self.conv2d(
                            input, filters,
                            image_shape, filter_shape,
                            border_mode,
                            unroll_patch=True,
                            openmp=openmp)
                        mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
                            allow_gc=False,
                            use_cloop=True))
                        theano_conv = theano.function([], output, mode=mode)
                        t1 = time.time()
                        theano_conv.fn(n_calls=n_calls)
                        t2 = time.time()
                        print(t2 - t1, end=' ')
                    print()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_including():
    mode = theano.Mode(optimizer='merge')
    mode.including('fast_compile')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_clinker_literal_cache():
    # This caused bugs in the past related to the cache.
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")

    mode = theano.Mode(linker='c')

    A = theano.tensor.matrix()
    input1 = theano.tensor.vector()

    normal_svd = numpy.array([[5.936276e+01, -4.664007e-07, -2.56265e-06],
                              [-4.664007e-07, 9.468691e-01, -3.18862e-02],
                              [-2.562651e-06, -3.188625e-02, 1.05226e+00]],
                             dtype=theano.config.floatX)

    orientationi = numpy.array([59.36276866, 1.06116353, 0.93797339],
                               dtype=theano.config.floatX)

    for out1 in [A - input1[0] * numpy.identity(3),
                 input1[0] * numpy.identity(3)]:
        benchmark = theano.function(
            inputs=[A, input1],
            outputs=[out1],
            on_unused_input='ignore',
            mode=mode)

        out1 = benchmark(normal_svd, orientationi)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_ifelse():
    a = T.scalar()
    b = generic()
    c = generic()

    notimpl = NotImplementedOp()
    lazys = [True]
    # We need lazy to end up being True for this test.
    if theano.config.vm.lazy in [True, None]:
        lazys = [True, None]
    cloops = [True, False]
    if theano.config.cxx == "":
        cloops = [False]
    for cloop in cloops:
        for lazy in lazys:
            linker = theano.gof.vm.VM_Linker(use_cloop=cloop, lazy=lazy)
            f = function([a, b, c], ifelse(a, notimpl(b), c),
                         mode=Mode(linker=linker, optimizer='fast_run'))

            try:
                # print "case 1"
                f(1, 'a', 'b')
                assert False
            except NotImplementedOp.E:
                pass
            # print "... passed"

            # print "case 2"
            # print f(0, 'a', 'b')
            assert f(0, 'a', 'b') == 'b'
            # print "... passed"
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def more_complex_test():
    notimpl = NotImplementedOp()
    ifelseifelseif = IfElseIfElseIf()

    x1 = T.scalar('x1')
    x2 = T.scalar('x2')
    c1 = T.scalar('c1')
    c2 = T.scalar('c2')
    t1 = ifelse(c1, x1, notimpl(x2))
    t1.name = 't1'
    t2 = t1 * 10
    t2.name = 't2'
    t3 = ifelse(c2, t2, x1 + t1)
    t3.name = 't3'
    t4 = ifelseifelseif(T.eq(x1, x2), x1, T.eq(x1, 5), x2, c2, t3, t3 + 0.5)
    t4.name = 't4'

    f = function([c1, c2, x1, x2], t4, mode=Mode(linker='vm',
                                                 optimizer='fast_run'))
    if theano.config.vm.lazy is False:
        try:
            f(1, 0, numpy.array(10, dtype=x1.dtype), 0)
            assert False
        except NotImplementedOp.E:
            pass
    else:
        print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0))
        assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5
    print('... passed')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_callback(self):
        a, b, c = tensor.scalars('abc')
        f = function([a, b, c], (a + b) + c,
                     mode=Mode(
                         optimizer=None,
                         linker=vm.VM_Linker(callback=self.callback)))

        f(1, 2, 3)
        assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
        f(1, 2, 3)
        assert (sum(self.n_callbacks.values()) ==
                len(f.maker.fgraph.toposort()) * 2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_partial_function_with_output_keys():

    def check_partial_function_output_keys(linker_name):
        x = tensor.scalar('input')
        y = 3 * x
        f = theano.function([x], {'a': y * 5, 'b': y - 7}, mode=Mode(
            optimizer=None, linker=linker_name))

        assert f(5, output_subset=['a'])['a'] == f(5)['a']

    check_partial_function_output_keys(vm.VM_Linker(allow_partial_eval=True, use_cloop=False))
    check_partial_function_output_keys('cvm')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_leak_many_call_lazy():
        # Verify no memory leaks when calling a function a lot of times

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak

        def build_graph(x, depth=5):
            z = x
            for d in range(depth):
                z = ifelse(z.mean() > 0.5, -z, z)
            return z

        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a,
                           mode=Mode(optimizer=None,
                                     linker=linker()))
            inp = numpy.random.rand(1000000)
            for i in xrange(100):
                f_a(inp)
            if 0:  # this doesn't seem to work, prints 0 for everything
                import resource
                pre = resource.getrusage(resource.RUSAGE_SELF)
                post = resource.getrusage(resource.RUSAGE_SELF)
                print(pre.ru_ixrss, post.ru_ixrss)
                print(pre.ru_idrss, post.ru_idrss)
                print(pre.ru_maxrss, post.ru_maxrss)
        print(1)
        time_linker('vmLinker_C',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
        print(2)
        time_linker('vmLinker',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_leak_many_call_nonlazy():
        # Verify no memory leaks when calling a function a lot of times

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak.

        def build_graph(x, depth=5):
            z = x
            for d in range(depth):
                z = tensor.sin(-z + 1)
            return z

        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a,
                           mode=Mode(optimizer=None,
                                     linker=linker()))
            inp = numpy.random.rand(1000000)
            for i in xrange(500):
                f_a(inp)
        print(1)
        time_linker('vmLinker_C',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
        print(2)
        time_linker('vmLinker',
                    lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_reallocation():
    x = tensor.scalar('x')
    y = tensor.scalar('y')
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
              vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
        m = theano.compile.get_mode(theano.Mode(linker=l))
        m = m.excluding('fusion', 'inplace')

        f = theano.function([x, y], z, name="test_reduce_memory",
                            mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if (storage_map[i][0] and
                                storage_map[i][0] is storage_map[o][0]):
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set(id(v) for v in
                       itervalues(storage_map))) < len(storage_map)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_eliminate_nonseqs(self):
        W = tensor.scalar('W')
        sh = theano.shared(asarrayX(2.))
        x1 = tensor.vector('x1')
        x2 = tensor.scalar('x2')

        def rec_fn(*args):
            w = args[-1]
            return [(w + 1.,  # mitsot
                     w + 2.,  # sitsot
                     w + 3.),  # nitsot
                    {sh: w + 4.}]  # shared

        [X1, X2, X3], updates = theano.scan(
            rec_fn,
            [],
            [dict(initial=x1, taps=[-1, -3]), x2, None],
            W,
            n_steps=5,
            truncate_gradient=-1,
            go_backwards=False)
        f = theano.function([W, x1, x2], [X1, X2, X3],
                            updates=updates,
                            mode=theano.Mode(linker='py'),
                            allow_input_downcast=True)
        rng = numpy.random.RandomState(utt.fetch_seed())
        v_w = asarrayX(rng.uniform())
        outs = f(v_w, [0, 0, 0], 0)
        utt.assert_allclose(outs[0], v_w + 1)
        utt.assert_allclose(outs[1], v_w + 2)
        utt.assert_allclose(outs[2], v_w + 3)
        utt.assert_allclose(sh.get_value(), v_w + 4)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_composite_neg_bool(self):
        # Check that taking the negation of a Boolean intermediate value
        # works correctly with Python code. It used to be an issue because
        # `-numpy.bool_(True)` is False and `-numpy.bool_(False)` is True.
        x = floats('x')
        y = - (x > 0)
        z = Composite([x], [y]).make_node(x).outputs[0]
        f = theano.function([x], z, mode=theano.Mode(linker='py'))
        for inp, out in zip([-1, 0, 1], [0, 0, -1]):
            self.assertTrue(f(inp) == out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad_abs():
    a = theano.tensor.fscalar("a")
    b = theano.tensor.nnet.relu(a)
    c = theano.grad(b, a)
    f = theano.function([a], c, mode=theano.Mode(optimizer=None))
    # Currently Theano return 0.5, but it isn't sure it won't change
    # in the futur.
    ret = f(0.)
    assert ret == 0.5, ret

# Testing of Composite is done in tensor/tests/test_opt.py
# in test_fusion, TestCompositeCodegen
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shared_input_output():
    # Test bug reported on the mailing list by Alberto Orlandi
    # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
    # The shared variable is both an input and an output of the function.
    inc = theano.tensor.iscalar('inc')
    state = theano.shared(0)
    state.name = 'state'
    linker = theano.gof.CLinker()
    mode = theano.Mode(linker=linker)
    f = theano.function([inc], state, updates=[(state, state + inc)],
                        mode=mode)
    g = theano.function([inc], state, updates=[(state, state + inc)])

    # Initial value
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 0, (f0, g0)

    # Increment state via f, returns the previous value.
    f2 = f(2)
    assert f2 == f0, (f2, f0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 2, (f0, g0)

    # Increment state via g, returns the previous value
    g3 = g(3)
    assert g3 == g0, (g3, g0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 5, (f0, g0)

    vstate = theano.shared(numpy.zeros(3, dtype='int32'))
    vstate.name = 'vstate'
    fv = theano.function([inc], vstate, updates=[(vstate, vstate + inc)],
                         mode=mode)
    gv = theano.function([inc], vstate, updates=[(vstate, vstate + inc)])

    # Initial value
    fv0 = fv(0)
    gv0 = gv(0)
    assert numpy.all(fv0 == 0), fv0
    assert numpy.all(gv0 == 0), gv0

    # Increment state via f, returns the previous value.
    fv2 = fv(2)
    assert numpy.all(fv2 == fv0), (fv2, fv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert numpy.all(fv0 == 2), fv0
    assert numpy.all(gv0 == 2), gv0

    # Increment state via g, returns the previous value
    gv3 = gv(3)
    assert numpy.all(gv3 == gv0), (gv3, gv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert numpy.all(fv0 == 5), fv0
    assert numpy.all(gv0 == 5), gv0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_speed_lazy():

    def build_graph(x, depth=5):
        z = x
        for d in range(depth):
            z = ifelse(z[0] > 0, -z, z)
        return z

    def time_linker(name, linker):
        steps_a = 10
        steps_b = 100
        x = tensor.vector()
        a = build_graph(x, steps_a)
        b = build_graph(x, steps_b)

        f_a = function([x], a,
                       mode=Mode(optimizer=None,
                                 linker=linker()))
        f_b = function([x], b,
                       mode=Mode(optimizer=None,
                                 linker=linker()))

        f_a([2.0])
        t0 = time.time()
        f_a([2.0])
        t1 = time.time()

        f_b([2.0])

        t2 = time.time()
        f_b([2.0])
        t3 = time.time()

        t_a = t1 - t0
        t_b = t3 - t2

        print("%s takes %f s/Kop" % (
            name,
            (1000 * (t_b - t_a) / (steps_b - steps_a))))

    time_linker('vmLinker', vm.VM_Linker)
    time_linker('vmLinker_nogc', lambda: vm.VM_Linker(allow_gc=False))
    if theano.config.cxx:
        time_linker('vmLinker_C', lambda: vm.VM_Linker(allow_gc=False,
                                                       use_cloop=True))