Python theano 模块,compile() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.compile()

项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_merge_abs():
    x, y, z = T.matrices('xyz')
    x_val = numpy.random.rand(5, 5).astype(config.floatX)
    y_val = numpy.random.rand(5, 5).astype(config.floatX)
    z_val = numpy.random.rand(5, 5).astype(config.floatX)
    mode = theano.config.mode
    if mode == "FAST_COMPILE":
        mode = "FAST_RUN"
    mode = theano.compile.mode.get_mode(mode).excluding(
                                                    "local_elemwise_fusion")

    f = theano.function([y, z], (abs(y * z * -2)), mode=mode)
    f(y_val, z_val)
    assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)
    assert len(f.maker.fgraph.toposort()) == 2

    f = theano.function([x, y], abs(x / y), mode=mode)
    f(x_val, y_val)
    assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)
    assert len(f.maker.fgraph.toposort()) == 2
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gpu_fusion(self):
        shp = (5, 5)
        # we need the optimisation enabled, debug do this.
        if theano.config.mode == "FAST_COMPILE":
            mode = theano.compile.mode.get_mode("FAST_RUN").including(
                'local_elemwise_fusion',  'composite_elemwise_fusion',
                'canonicalize', 'gpu')
        else:
            mode = theano.compile.mode.get_default_mode().including(
                'local_elemwise_fusion',  'composite_elemwise_fusion',
                'canonicalize', 'gpu')
        import theano.sandbox.cuda as cuda
        if not cuda.cuda_available:
            raise SkipTest("cuda not available")

        self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gpu_fusion_Xd(self):
        # we need the optimisation enabled, debug do this.
        if theano.config.mode == "FAST_COMPILE":
            mode = theano.compile.mode.get_mode("FAST_RUN").including(
                'local_elemwise_fusion',  'composite_elemwise_fusion',
                'canonicalize', 'gpu')
        else:
            mode = theano.compile.mode.get_default_mode().including(
                'local_elemwise_fusion',  'composite_elemwise_fusion',
                'canonicalize', 'gpu')
        import theano.sandbox.cuda as cuda
        if not cuda.cuda_available:
            raise SkipTest("cuda not available")
        sizes = cuda.opt.get_device_type_sizes()
        if sizes['int_size'] == 4:
            shp = (5, 5, 5, 5)
        else:
            shp = (5, 5, 5)
        self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_and(self):
        mode = theano.compile.get_default_mode().including('canonicalize')

        x = T.scalar('x', dtype='int8')

        for zero, one in [(numpy.int8(0), numpy.int8(1)), (0, 1)]:
            f = theano.function([x], T.and_(x, zero), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(zero, x), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(x, one), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)

            f = theano.function([x], T.and_(one, x), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test0(self):
        x = shared(self.rng.randn(3, 7))
        a = tensor.alloc(x, 6, 7)

        # It is a bad idea to have tensor.alloc return x directly,
        # because the shape mismatch cannot be caught.
        assert a.owner and isinstance(a.owner.op, tensor.Alloc)

        f = function([], a, mode=mode_opt)
        # The optimization should then be applied, and remove Alloc
        assert ([node.op for node in f.maker.fgraph.toposort()]
                == [deep_copy_op])

        # In DebugMode, the shape mismatch should be detected
        if isinstance(mode_opt, compile.DebugMode):
            self.assertRaises(ValueError, f)

        # No need to check_stack_trace as the optimization
        # local_canonicalize_alloc only removes nodes.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_test_local_remove_useless_assert2(self):
        # remove assert condition that are always true
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 1),
                            mode=mode)
        assert f(1, 1) == 1
        assert f(5, 1) == 5
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 2
        assert topo[1].op == deep_copy_op
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_useless_assert3(self):
        # don't remove assert condition that are always false
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 0),
                            mode=mode)
        self.assertRaises(AssertionError, f, 1, 0)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 3
        assert topo[1].op == deep_copy_op
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_constant_folding():
    """ Test that constant folding get registered at fast_compile

    An error removed that registration during the registration.
    """
    x = tensor.dvector()
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([x], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2

    # Test that we do not crash when constant folding elemwise scalar
    # as they should not generate c code.

    x = tensor.constant(3)
    assert x.ndim == 0
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2
    assert all([isinstance(n.op, DeepCopyOp) for n in topo])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        # condition values
        self.condm = numpy.asarray([[0.1, 0, 1, -1],
                                    [0., 0., 0., 0.],
                                    [1, 1, 1, 1]])
        self.condv = numpy.asarray([0.1, 0, 1, -1])
        self.conds = [0.1, 0, 1, -1]

        # x values
        self.xm = numpy.ones((3, 4))
        self.xv = numpy.ones((4,))
        self.xs = 1.

        # expected results
        self.resm = [numpy.asarray([[1, 0, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1]])]*3 + [numpy.asarray([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]])] + \
                    2*[numpy.asarray([[1, 0, 1, 0]])] + [[numpy.ones((3, 4)), numpy.zeros((3, 4)), numpy.ones((3, 4)), numpy.zeros((3, 4))]] + \
                    [[numpy.ones((4,)), numpy.zeros((4,)), numpy.ones((4,)), numpy.zeros((4,))]] + \
                    [[numpy.asarray(1.0), numpy.asarray(
                        0.0), numpy.asarray(1.0), numpy.asarray(0.0)]]

        self.mode = theano.compile.mode.get_default_mode().including(
                'canonicalize', 'fast_run').excluding('gpu', 'fusion')
        self.mode = copy.copy(self.mode)
        self.mode.check_isfinite = False
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def speed_local_log_erfc(self):

        val = numpy.random.rand(1e6)
        x = T.vector()
        mode = theano.compile.mode.get_mode("FAST_RUN")
        f1 = theano.function([x], T.log(T.erfc(x)), mode=mode.
            excluding("local_log_erfc"))
        f2 = theano.function([x], T.log(T.erfc(x)), mode=mode)
        print(f1.maker.fgraph.toposort())
        print(f2.maker.fgraph.toposort())
        t0 = time.time()
        f1(val)
        t1 = time.time()
        f2(val)
        t2 = time.time()
        print(t1 - t0, t2 - t1)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_split():
    x = tensor.matrix('x')
    splits = tensor.ivector('splits')
    opt = tensor.split(x, splits, n_splits=1)
    nonopt = tensor.split(x, splits, n_splits=3)

    mode = compile.get_default_mode().including("local_useless_split")
    f_opt = theano.function([x, splits], opt, mode=mode)
    f_nonopt = theano.function([x, splits], nonopt, mode=mode)

    f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])
    f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])
    graph_opt = f_opt.maker.fgraph.toposort()
    graph_nonopt = f_nonopt.maker.fgraph.toposort()

    assert isinstance(graph_opt[-1].op, DeepCopyOp)
    assert len(graph_nonopt)==1
    assert isinstance(graph_nonopt[0].op, tensor.Split)

    assert check_stack_trace(f_opt, ops_to_check=[Assert])
    assert check_stack_trace(f_nonopt, ops_to_check='all')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_sumsqr2dot():
    G = matrix('G')
    W = matrix('W')

    y = T.sqr(W.dimshuffle('x', 0, 1) * G.dimshuffle(0, 'x', 1)).sum(axis=(1, 2))
    MODE = theano.compile.get_default_mode().including('local_sumsqr2dot')

    f = function([W, G], y, mode=MODE)

    w_val = numpy.random.rand(4, 3).astype(config.floatX)
    g_val = numpy.random.rand(5, 3).astype(config.floatX)

    f_val = f(w_val, g_val)
    f_test = numpy.dot(numpy.square(g_val), numpy.square(w_val).sum(axis=0))

    utt.assert_allclose(f_val, f_test)
    assert any(isinstance(n.op, (tensor.basic.Dot, tensor.blas.Dot22,
                                 tensor.blas.Gemv, tensor.blas_c.CGemv))
               for n in f.maker.fgraph.toposort())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, name,
                 shared=tensor._shared,
                 sub=tensor.AdvancedSubtensor,
                 inc_sub=tensor.AdvancedIncSubtensor,
                 mode=None,
                 dtype=theano.config.floatX,
                 ignore_topo=DeepCopyOp):
        self.shared = shared
        self.sub = sub
        self.inc_sub = inc_sub
        if mode is None:
            mode = theano.compile.mode.get_default_mode()
        self.mode = mode
        self.dtype = dtype
        self.ignore_topo = ignore_topo
        super(TestAdvancedSubtensor, self).__init__(name)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def handle_shared_float32(tf):
    """
    Set the default shared type for float32 tensor to CudaNdarrayType.

    This function is intended to be called from use(gpu_index), not directly.

    """
    if tf:
        theano.compile.shared_constructor(float32_shared_constructor)
    else:
        theano.compile.shared_constructor(float32_shared_constructor, True)
        assert (float32_shared_constructor not in
                theano.compile.shared.constructors)

# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _dnn_check_compile():
    preambule = """
#include <stdio.h>
#include <cudnn.h>
#include <cudnn_helper.h>
"""

    # No need for the context in here since we won't execute that code
    body = """
cudnnHandle_t _handle = NULL;
cudnnStatus_t err;
if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) {
  fprintf(stderr, "could not create cuDNN handle: %s",
          cudnnGetErrorString(err));
  return 1;
}
"""

    params = ["-l", "cudnn", "-I" + os.path.dirname(__file__)]
    if config.dnn.include_path:
        params.append("-I" + config.dnn.include_path)
    if config.dnn.library_path:
        params.append("-L" + config.dnn.library_path)
    # Do not run here the test program. It would run on the
    # default gpu, not the one selected by the user. If mixed
    # GPU are installed or if the GPUs are configured in
    # exclusive mode, this cause bad detection.
    avail, out, err = GCC_compiler.try_flags(
        params, preambule=preambule, body=body,
        try_run=False, output=True)

    if not avail:
        return False, ("cannot compile with cuDNN. "
                       "We got this error:\n" + str(err))
    return True, None
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_filter_float():
    theano.compile.shared_constructor(gpuarray_shared_constructor)
    try:
        s = theano.shared(numpy.array(0.0, dtype='float32'),
                          target=test_ctx_name)
        theano.function([], updates=[(s, 0.0)])
    finally:
        del theano.compile.sharedvalue.shared.constructors[-1]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_abs_mul_div(self):
        """
        test that if we have
        4 * x / abs(2*x) it get simplifier during canonicalisation.
        """

        x = T.dscalar()
        a = T.abs_(x)

        if theano.config.mode == 'FAST_COMPILE':
            mode = theano.compile.mode.get_mode('FAST_RUN').excluding(
                    "local_elemwise_fusion")
        else:
            mode = theano.compile.mode.get_default_mode().excluding(
                    "local_elemwise_fusion")

        f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn

        f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise_fusion_4d(self):
        shp = (3, 3, 3, 3)
        mode = copy.copy(compile.mode.get_default_mode())
        # we need the optimisation enabled and the canonicalize.
        # the canonicalize is needed to merge multiplication/addition by constant.
        mode._optimizer = mode._optimizer.including(
            'local_elemwise_fusion', 'composite_elemwise_fusion',
            'canonicalize')
        self.do(mode, shared, shp)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def speed_fusion(self, shared_fn=shared, gpu=False, s=None):
        """
        param type s: a slice object
        param s: a slice to apply to the case to execute. If None, exec all case.
        """

        shp = (3000, 3000)
        shp = (1000, 1000)
        nb_repeat = 50
#        linker=gof.CLinker
#        linker=gof.OpWiseCLinker

        mode1 = copy.copy(compile.get_default_mode())
        mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')
        # TODO:clinker is much faster... but use to much memory
        # Possible cause: as their is do deletion of intermediate value when we don't keep the fct.
        # More plausible cause: we keep a link to the output data?
        # Follow up. Clinker do the same... second cause?
        mode2 = copy.copy(compile.get_default_mode())
        mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')
        print("test with linker", str(mode1.linker))
        times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        print("times1 with local_elemwise_fusion")
        print(times1, times1.min(), times1.max(), times1.sum())
        print("times2 without local_elemwise_fusion")
        print(times2, times2.min(), times2.max(), times2.sum())
        d = times2 / times1

        print("times2/times1")
        print(d)
        print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \
            "mean", d.mean(), "std", d.std())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fusion_inplace(self):
        mode = copy.copy(compile.mode.get_default_mode())
        # we need the optimisation enabled and the canonicalize.
        # the canonicalize is needed to merge multiplication/addition by constant.
        mode._optimizer = mode._optimizer.including(
            'local_elemwise_fusion',  'composite_elemwise_fusion',
            'canonicalize', 'inplace')

        x, y, z = dmatrices('xyz')
        f = theano.function([x, y, z], tensor.dot(x, y) + x + y + z, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert f.maker.fgraph.toposort()[-1].op.inplace_pattern
        f(numpy.random.random((5, 5)), numpy.random.random((5, 5)),
            numpy.random.random((5, 5)))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_log_add():
    m = theano.config.mode
    if m == 'FAST_COMPILE':
        m = 'FAST_RUN'
    m = compile.mode.get_mode(m)
    m = m.excluding('fusion')
    m = copy.copy(m)
    # No need to put them back as we have a new object
    m.check_isfinite = False

    # check some basic cases
    x = dvector()
    y = dvector()
    f = function([x, y], T.log(T.exp(x) + T.exp(y)), mode=m)

    f([10000], [10000])  # causes overflow if handled incorrectly
    assert numpy.isfinite(f([10000], [10000]))
    utt.assert_allclose(f([10000], [10000]), 10000 + numpy.log1p(1))

    # test that it give the same result when it don't overflow
    f([10], [10])  # don't causes overflow
    utt.assert_allclose(f([10], [10]), 10 + numpy.log1p(1))

    # test that it also works with more than two args, (this currently fails)
    x = dvector()
    y = dvector()
    f = function([x, y], T.log(T.exp(x) + T.exp(y) + T.exp(x - y) + T.exp(
        x + y)), mode=m)

    try:
        f([10000], [10000])  # causes overflow if handled incorrectly
        utt.assert_allclose(f([10000], [10000]), 20000)
    except utt.WrongValue:
        raise SkipTest("log(add(exp)) is not stabilized when adding "
                       "more than 2 elements, see #623")

    # TODO: test that the optimization works in the presence of broadcasting.

    # TODO: (write and) test that the optimization works with Sum in addition to working with Add.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_stack_trace(self):
        x, y, z = tensor.lscalars('xyz')
        v = make_vector(x, y, z)

        mode = theano.compile.mode.get_default_mode().including(
                "local_subtensor_make_vector")

        # list of subtensor cases, where local_subtensor_make_vector
        # inserts a new MakeVector node
        v_subtensors = [v[:2], v[::2], v[[0, 2]]]

        for v_subtensor in v_subtensors:
            f = function([x, y, z], v_subtensor, mode=mode)
            self.assertTrue(check_stack_trace(f, ops_to_check='all'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        utt.seed_rng()
        mode = theano.compile.mode.get_default_mode()
        self.mode = mode.including("local_adv_sub1_adv_inc_sub1").excluding("fusion")
        self.mode_no_assert = self.mode.including("local_remove_all_assert")
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        mode = theano.compile.mode.get_default_mode()
        self.mode = mode.including("local_incsubtensor_of_zeros",
                                   "local_setsubtensor_of_constants",
                                   "local_0_dot_x")
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_IncSubtensor_serialize():
    d = numpy.random.normal(0, 0.01, size=(100, 100))
    d = d.astype(theano.config.floatX)

    W = theano.shared(d, name='W')
    i = T.vector('i', dtype='int64')
    j = T.vector('j', dtype='int64')
    t = T.scalar('t')
    if theano.tensor.subtensor.inplace_increment:
        y = (W[i] + W[j] + W[1] + W[i, j]).sum()
    else:
        y = (W[i] + W[j] + W[1]).sum()
    cost = T.sqr(t - y)
    dW = theano.grad(cost, W)
    mode = theano.compile.mode.get_default_mode().excluding('fusion')
    mode = mode.including("local_IncSubtensor_serialize")
    f = theano.function([i, j, t], updates=[(W, W - 0.01 * dW)], mode=mode)
    topo = f.maker.fgraph.toposort()
    adds = [n for n in topo if isinstance(n.op, T.Elemwise) and
            isinstance(n.op.scalar_op, theano.scalar.Add)]
    for a in adds:
        assert not any([inp.owner and
                        isinstance(inp.owner.op,
                                   (tensor.IncSubtensor,
                                    tensor.AdvancedIncSubtensor,
                                    tensor.AdvancedIncSubtensor1))
                        for inp in a.inputs])

    # Now test that the stack trace is copied over properly,
    # if we return the gradients. We need to use same mode as before.
    f = theano.function([i, j, t], dW, mode=mode)
    assert check_stack_trace(f, ops_to_check=[
        tensor.IncSubtensor, tensor.AdvancedIncSubtensor,
        tensor.AdvancedIncSubtensor1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_subtensor_of_dot():
    m1 = theano.tensor.matrix()
    m2 = theano.tensor.matrix()
    d1 = numpy.arange(6).reshape((3, 2)).astype(config.floatX)
    d2 = numpy.arange(8).reshape((2, 4)).astype(config.floatX) + 10
    mode = compile.get_default_mode().including("local_subtensor_of_dot")

    def test_equality(a, b):
        return a.shape == b.shape and numpy.allclose(a, b)

    # [cst]
    f = theano.function([m1, m2], theano.dot(m1, m2)[1], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1])
    # DimShuffle happen in FAST_COMPILE
    assert isinstance(topo[-1].op, (T.blas_c.CGemv, T.blas.Gemv, T.DimShuffle))

    # slice
    f = theano.function([m1, m2], theano.dot(m1, m2)[1:2], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1:2])
    assert isinstance(topo[-1].op, (T.blas.Dot22))

    m1 = theano.tensor.tensor3()
    m2 = theano.tensor.tensor3()
    idx = theano.tensor.iscalar()
    d1 = numpy.arange(30).reshape(2, 5, 3).astype(config.floatX)
    d2 = numpy.arange(72).reshape(4, 3, 6).astype(config.floatX) + 100

    f = theano.function([m1, m2, idx], theano.dot(m1, m2)[idx, 1:4, :, idx:], mode=mode)
    assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1, 1:4, :, 1:])
    # if we return the gradients. We need to use same mode as before.
    assert check_stack_trace(f, ops_to_check='last')

    f = theano.function([m1, m2, idx], theano.dot(m1, m2)[1:4, :, idx:, idx], mode=mode)
    assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1:4, :, 1:, 1])

    # Now test that the stack trace is copied over properly,
    # if we return the gradients. We need to use same mode as before.
    assert check_stack_trace(f, ops_to_check='last')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shape_inequality_with_self(self):
        x = T.vector('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
                                                           'local_shape_to_shape_i',
                                                           'local_track_shape_i',
                                                           'local_subtensor_make_vector')
        f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.maximum(x.shape[0], 0), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.maximum(0, x.shape[0]), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.minimum(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0

        f = theano.function([x], T.minimum(0, x.shape[0]), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0
        f = theano.function([x], T.minimum([0, 0], x.shape[0]), mode=mode)
        # This case isn't optimized.
#        self.assert_eqs_const(f, 0)
        utt.assert_allclose(f(x_val), [0, 0])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shape_add_inequality(self):
        x = T.vector('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
                                                           'local_shape_to_shape_i',
                                                           'local_track_shape_i',
                                                           'local_subtensor_make_vector')

        y = T.vector('y', dtype=config.floatX)

        f = theano.function([x, y], T.lt(x.shape[0]+y.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x, y], T.ge(x.shape[0]+y.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 1)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_xor(self):
        mode = theano.compile.get_default_mode().including('canonicalize')
        x = T.scalar('x', dtype='int8')

        f = theano.function([x], T.xor(x, x), mode=mode)
        self.assert_eqs_const(f, 0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        # The optimization requires the shape feature so we need to compile in
        # FAST_RUN mode.
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        self.mode = compile.mode.get_mode(mode)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcasted_dims(self):
        # This test a case that caused a crash during optimization
        shp = (1, 1, 1, 1)
        rng = numpy.random.RandomState(utt.fetch_seed())
        a = shared(rng.rand(*shp).astype(config.floatX))
        out = self.max_pool_c01b(a, 1, 1, 1)

        # max_pool_c01b use -inf and this will trigger DebugMode error.
        mode = copy.copy(theano.compile.get_default_mode())
        mode.check_isfinite = False
        f = theano.function([], out, mode=mode)
        f()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_shapeopt(self):
        # Test that a basic example works even when ShapeOpt is excluded
        X = T.matrix()
        expr = X.shape[0]

        mode = theano.compile.get_default_mode().excluding('ShapeOpt')
        f = theano.function([X], expr, mode=mode)
        print(f([[1, 2], [2, 3]]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_all_assert1(self):
        # remove assert condition that are unknown
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode).including('local_remove_all_assert')

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y),
                            mode=mode)
        if isinstance(mode, theano.compile.debugmode.DebugMode):
            # DebugMode will run the original version with the Assert
            self.assertRaises(AssertionError, f, 1, 0)
        else:
            f(1, 0)  # Without opt, it should fail.
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1, topo
        assert topo[0].op == deep_copy_op, topo

        mode = compile.mode.get_default_mode()
        a = theano.tensor.opt.assert_op(x, T.eq(x, 0).any())
        f = theano.function([x], a, mode=mode.excluding('unsafe'))
        topo = f.maker.fgraph.toposort()
        a_op = [n for n in topo if isinstance(n.op, T.opt.Assert)]
        assert len(a_op) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_mul_specialize():
    mode = theano.config.mode
    if mode == 'FAST_COMPILE':
        mode = 'FAST_RUN'
    mode = compile.mode.get_mode(mode)
    mode = mode.excluding('fusion')

    v = T.vector()
    m = T.vector()

    f = function([v], v * 1, mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    nodes == [deep_copy_op]

    f = function([v], v * 0, mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [Shape_i(0), T.alloc]

    f = function([v], v * (-1), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [T.neg]

    f = function([v, m], v * 1 * (-m), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [T.mul]

    f = function([v, m], v * 0 * (-m), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [Shape_i(0), T.alloc]

    f = function([v, m], v * (-1) * (-m), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [T.mul]

    f = function([v, m], v * (-1) * m, mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert nodes == [T.mul]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_tile(self):
        v = T.vector()
        m = T.matrix()
        mode = None
        if theano.config.mode == "FAST_COMPILE":
            mode = "FAST_RUN"
        for var, data in [(v, [1, 2, 3]), (m, [[1, 2], [3, 4]])]:
            # When len(repeat pattern) <= var.ndim, everything is removed
            # for ndim in range(1, var.ndim):
            for ndim in range(var.ndim + 1):
                f = theano.function([var], tile(var, (1,) * ndim), mode=mode)
                topo = f.maker.fgraph.toposort()
                assert len(topo) == 1
                assert isinstance(topo[0].op, compile.DeepCopyOp)
                f(data)
                # In this case the opt only removes nodes,
                # no need to check_stack_trace
            # When len(repeat pattern) > var.ndim, only a dimshuffle should be
            # left, but there can be a DeepCopy as well
            for ndim in range(var.ndim + 1, var.ndim + 3):
                f = theano.function([var], tile(var, (1,) * ndim), mode=mode)
                topo = f.maker.fgraph.toposort()
                assert len(topo) <= 2
                assert isinstance(topo[0].op, DimShuffle)
                assert check_stack_trace(f, ops_to_check=[DimShuffle])
                f(data)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_pow_specialize_device_more_aggressive_on_cpu():
    mode = theano.config.mode
    if mode == 'FAST_COMPILE':
        mode = 'FAST_RUN'
    mode = compile.mode.get_mode(mode)
    mode = mode.excluding('fusion').excluding('gpu')

    v = T.vector()
    val = numpy.arange(10, dtype=theano.config.floatX)
    val_no0 = numpy.arange(1, 10, dtype=theano.config.floatX)
    f = function([v], v ** (15), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert len(nodes) == 1
    assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6
    assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)
    utt.assert_allclose(f(val), val ** 15)

    f = function([v], v ** (-15), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert len(nodes) == 2
    assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6
    assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)
    assert isinstance(nodes[-1].scalar_op, theano.scalar.basic.Inv)
    utt.assert_allclose(f(val_no0), val_no0 ** (-15))

    f = function([v], v ** (16), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert len(nodes) == 1
    assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4
    assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)
    utt.assert_allclose(f(val), val ** 16)

    f = function([v], v ** (-16), mode=mode)
    nodes = [node.op for node in f.maker.fgraph.toposort()]
    assert len(nodes) == 2
    assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4
    assert isinstance(nodes[0].scalar_op, theano.scalar.Composite)
    assert isinstance(nodes[-1].scalar_op, theano.scalar.basic.Inv)
    utt.assert_allclose(f(val_no0), val_no0 ** (-16))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_rebroadcast(self):
        mode = theano.compile.get_default_mode().including('canonicalize')
        v1 = T.vector()
        v2 = T.vector()
        j = T.join(0, v1, v2)
        f = theano.function([v1, v2], j, mode=mode)
        f([1, 2], [3, 4, 5])
        e = f.maker.fgraph.toposort()
        assert len([n for n in e if isinstance(n.op, T.Rebroadcast)]) == 0

        assert check_stack_trace(f, ops_to_check='all')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_rebroadcast_rebroadcast(self):
        mode = theano.compile.get_default_mode().including('canonicalize')
        m = T.matrix()
        s = T.addbroadcast(m, 0, 1)
        v = T.unbroadcast(s, 1)
        f = theano.function([m], v, mode=mode)
        f([[76]])
        e = f.maker.fgraph.toposort()
        rebroadcast_nodes = [n for n in e if isinstance(n.op, T.Rebroadcast)]
        assert len(rebroadcast_nodes) == 1
        assert rebroadcast_nodes[0].op.axis == {0: True}
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.mode = theano.compile.get_default_mode().including(
            'canonicalize', 'local_fill_to_alloc')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        mode = theano.compile.get_default_mode()
        self.mode = mode.including('local_func_inv')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_constant_get_stabilized():
    """
    Currently Theano enable the constant_folding optimization before stabilization optimization.
    This cause some stabilization optimization not being implemented and thus cause inf value to appear
    when it should not.

    .. note: we can't simply move the constant_folding optimization to specialize as this break other optimization!
    We will need to partially duplicate some canonicalize optimzation to specialize to fix this issue.
    """
    x2 = T.scalar()
    y2 = T.log(1 + T.exp(x2))
    mode = theano.compile.get_default_mode()
    mode.check_isfinite = False
    f2 = theano.function([x2], y2, mode=mode)
    try:
        assert len(f2.maker.fgraph.toposort()) == 1
        assert f2.maker.fgraph.toposort()[0].op == \
                theano.tensor.nnet.sigm.softplus
        assert f2(800) == 800

        x = T.as_tensor_variable(800)
        y = T.log(1 + T.exp(x))
        f = theano.function([], y, mode=mode)
        assert len(f.maker.fgraph.toposort()) == 0
        assert numpy.isinf(f())

        # When this error is fixed, the following line should be ok.
        assert f() == 800, f()

    except AssertionError:
        raise SkipTest('Theano optimizes constant before stabilization. '
                       'This breaks stabilization optimization in some '
                       'cases. See #504.')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.mode = theano.compile.mode.get_default_mode().including(
                'canonicalize', 'fast_run').excluding('gpu', 'fusion')
        self.mode._optimizer.position_cutoff = 1.50001
        if theano.config.cxx == '' and not theano.scalar.basic_scipy.imported_scipy_special:
            raise SkipTest("erf need a c++ compiler or scipy")
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.mode = theano.compile.get_default_mode().including('canonicalize',
                                                                'specialize')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.mode = theano.compile.get_default_mode().including(
            'canonicalize',
            'specialize',
            'uncanonicalize', 'local_max_and_argmax')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.mode = theano.compile.get_default_mode().including('canonicalize')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_0(self):
        mode = theano.compile.get_default_mode().including(
            'local_useless_reshape')
        i = T.iscalar('i')
        m = theano.tensor.mgrid[0:i,]
        f = theano.function([i], m, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_1(self):
        x = theano.tensor.matrix('x')
        r = x.reshape(x.shape)

        m0 = theano.compile.get_default_mode()
        m1 = m0.including('local_useless_reshape')
        f1 = theano.function([x], r, mode=m1)
        topo = f1.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)

        m2 = m1.excluding('ShapeOpt')
        f2 = theano.function([x], r, mode=m2)
        topo = f2.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_2(self):
        x = theano.tensor.matrix('x')
        r = x.reshape([Shape_i(i)(x) for i in xrange(x.ndim)])

        m0 = theano.compile.get_default_mode()
        m1 = m0.including('local_useless_reshape')
        f1 = theano.function([x], r, mode=m1)
        topo = f1.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)

        m2 = m1.excluding('ShapeOpt')
        f2 = theano.function([x], r, mode=m2)
        topo = f2.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reshape_lift():
    x = tensor.tensor4()
    out = T.exp(x).reshape([x.size])
    assert out.ndim == 1
    mode = compile.mode.get_default_mode()
    mode = mode.including('local_reshape_lift')
    f = theano.function([x], out, mode=mode)
    f(numpy.random.rand(5, 4, 3, 2).astype(config.floatX))
    topo = f.maker.fgraph.toposort()
    assert isinstance(topo[-2].op, tensor.Reshape)
    assert isinstance(topo[-1].op, tensor.Elemwise)