Python theano 模块,dot() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.dot()

项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_sparse_sparse(self):
        # test dot for 2 input sparse matrix
        sparse_dtype = 'float64'
        sp_mat = {'csc': sp.csc_matrix,
                  'csr': sp.csr_matrix,
                  'bsr': sp.csr_matrix}

        for sparse_format_a in ['csc', 'csr', 'bsr']:
            for sparse_format_b in ['csc', 'csr', 'bsr']:
                a = SparseType(sparse_format_a, dtype=sparse_dtype)()
                b = SparseType(sparse_format_b, dtype=sparse_dtype)()
                d = theano.dot(a, b)
                f = theano.function([a, b], theano.Out(d, borrow=True))
                topo = f.maker.fgraph.toposort()
                for M, N, K, nnz in [(4, 3, 2, 3),
                                     (40, 30, 20, 3),
                                     (40, 30, 20, 30),
                                     (400, 3000, 200, 6000),
                                 ]:
                    a_val = sp_mat[sparse_format_a](
                        random_lil((M, N), sparse_dtype, nnz))
                    b_val = sp_mat[sparse_format_b](
                        random_lil((N, K), sparse_dtype, nnz))
                    f(a_val, b_val)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_csr_dense(self):
        x = theano.sparse.csr_matrix('x')
        y = theano.tensor.matrix('y')
        v = theano.tensor.vector('v')

        for (x, y, x_v, y_v) in [(x, y, self.x_csr, self.y),
                                 (x, v, self.x_csr, self.v_100),
                                 (v, x, self.v_10, self.x_csr)]:
            f_a = theano.function([x, y], theano.sparse.dot(x, y))
            f_b = lambda x, y: x * y

            utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))

            # Test infer_shape
            self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
                                    [x_v, y_v],
                                    (Dot, Usmm, UsmmCscDense))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_csc_dense(self):
        x = theano.sparse.csc_matrix('x')
        y = theano.tensor.matrix('y')
        v = theano.tensor.vector('v')

        for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y),
                                 (x, v, self.x_csc, self.v_100),
                                 (v, x, self.v_10, self.x_csc)]:

            f_a = theano.function([x, y], theano.sparse.dot(x, y))
            f_b = lambda x, y: x * y

            utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))

            # Test infer_shape
            self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
                                    [x_v, y_v],
                                    (Dot, Usmm, UsmmCscDense))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_int32_dtype(self):
        # Reported on the theano-user mailing-list:
        # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
        size = 9
        intX = 'int32'

        C = tensor.matrix('C', dtype=intX)
        I = tensor.matrix('I', dtype=intX)

        fI = I.flatten()
        data = tensor.ones_like(fI)
        indptr = tensor.arange(data.shape[0] + 1, dtype='int32')

        m1 = sparse.CSR(data, fI, indptr, (8, size))
        m2 = sparse.dot(m1, C)
        y = m2.reshape(shape=(2, 4, 9), ndim=3)

        f = theano.function(inputs=[I, C], outputs=y)
        i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
        a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
                          dtype=intX)
        f(i, a)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op_ss(self):
        for format in sparse.sparse_formats:
            for dtype in sparse.all_dtypes:
                variable, data = sparse_random_inputs(format,
                                                      shape=(10, 10),
                                                      out_dtype=dtype,
                                                      n=2,
                                                      p=0.1)

                f = theano.function(variable, self.op(*variable))

                tested = f(*data)

                x, y = [m.toarray() for m in data]
                expected = numpy.dot(x, y)

                assert tested.format == format
                assert tested.dtype == expected.dtype
                tested = tested.toarray()
                utt.assert_allclose(tested, expected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op_sd(self):
        for format in sparse.sparse_formats:
            for dtype in sparse.all_dtypes:
                variable, data = sparse_random_inputs(format,
                                                      shape=(10, 10),
                                                      out_dtype=dtype,
                                                      n=2,
                                                      p=0.1)
                variable[1] = tensor.TensorType(dtype=dtype,
                                                broadcastable=(False, False))()
                data[1] = data[1].toarray()

                f = theano.function(variable, self.op(*variable))

                tested = f(*data)
                expected = numpy.dot(data[0].toarray(), data[1])

                assert tested.format == format
                assert tested.dtype == expected.dtype
                tested = tested.toarray()
                utt.assert_allclose(tested, expected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inputs, g_outputs):
        r"""The gradient function should return

            .. math:: V\frac{\partial X^{-1}}{\partial X},

        where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
        ``inputs``. Using the `matrix cookbook
        <http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
        one can deduce that the relation corresponds to

            .. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T.

        """
        x, = inputs
        xi = self(x)
        gz, = g_outputs
        # TT.dot(gz.T,xi)
        return [-matrix_dot(xi, gz.T, xi).T]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gemm_nested():
    X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
        'a'), T.scalar('b')
    R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar(
        'c'), T.scalar('d')

    just_gemm([X, Y, Z, R, S, U, a, b, c, d],
            [a * Z - b * (c * T.dot(X, Y) + d * Z)],
            ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
                2, 4), (), (), (), ()],
            max_graphlen=1)
    # print "---------------------"
    just_gemm([X, Y, Z, R, S, U, a, b, c, d],
            [a * Z - b * (c * T.dot(X, Y) + d * Z + c * Z)],
            ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
                2, 4), (), (), (), ()],
            max_graphlen=1)
    # print "---------------------"
    just_gemm([X, Y, Z, R, S, U, a, b, c, d],
            [a * Z - b * (c * T.dot(X, Y) + d * Z + c * U)],
            ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (
                2, 4), (), (), (), ()],
            max_graphlen=3)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inplace0():
    # should fail to insert gemm_inplace because gemm_inplace would
    # create cycles
    X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar(
        'a'), T.scalar('b')
    R, S, c = T.matrix('R'), T.matrix('S'), T.scalar('c')

    f = inplace_func([Z, b, R, S],
            [Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN')
    if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
        print(pp(f.maker.fgraph.outputs[0]))
        raise Failure('gemm_inplace in graph')
    assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes]

    # gemm_inplace should be inserted here, to work in-place on Z*c
    f = inplace_func([X, Y, Z, a, b, R, S, c],
            [Z * (c * Z + a * T.dot(X, Y) + b * T.dot(R, S).T)],
            mode='FAST_RUN')
    if (not gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]):
        theano.printing.debugprint(f)
        raise Failure('no gemm_inplace in graph')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot22():
    for dtype1 in ['float32', 'float64', 'complex64', 'complex128']:
        a = T.matrix(dtype=dtype1)
        for dtype2 in ['float32', 'float64', 'complex64', 'complex128']:
            b = T.matrix(dtype=dtype2)
            f = theano.function([a, b], T.dot(a, b), mode=mode_blas_opt)
            topo = f.maker.fgraph.toposort()
            if dtype1 == dtype2:
                assert _dot22 in [x.op for x in topo], (dtype1, dtype2)
            else:
                check = [isinstance(x.op, T.Dot) for x in topo]
                assert any(check), (dtype1, dtype2)
            rng = numpy.random.RandomState(unittest_tools.fetch_seed())

            def cmp(a_shp, b_shp):
                av = rng.uniform(size=a_shp).astype(dtype1)
                bv = rng.uniform(size=b_shp).astype(dtype2)
                f(av, bv)

            cmp((3, 4), (4, 5))
            cmp((0, 4), (4, 5))
            cmp((3, 0), (0, 5))
            cmp((3, 4), (4, 0))
            cmp((0, 4), (4, 0))
            cmp((0, 0), (0, 0))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot22scalar_cast():
    """
    Test that in `dot22_to_dot22scalar` we properly cast integers to floats.
    """
    # Note that this test was failing before d5ff6904.
    A = T.dmatrix()
    for scalar_int_type in T.int_dtypes:
        y = T.scalar(dtype=scalar_int_type)
        f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
        assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
    A = T.fmatrix()
    for scalar_int_type in T.int_dtypes:
        y = T.scalar(dtype=scalar_int_type)
        f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
        if scalar_int_type in ['int32', 'int64']:
            assert _dot22 in [x.op for x in f.maker.fgraph.toposort()]
        else:
            assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_w_self():
    # This can trigger problems in the optimization because what would
    # normally be a gemm must not be because the output is aliased to
    # one of the inputs.

    A = shared(value=numpy.ones((2, 2)))
    B = T.matrix()

    p = T.dot(A, A) * B

    grad = T.grad(T.mean(p), A)
    f = theano.function([B], p, updates=[(A, A - grad)])

    # tests correctness in debugmode
    f(numpy.asarray([[0, 1], [2, 3]], dtype=config.floatX))


###############################################################################
# Tests for Gemv
###############################################################################
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_vm(self):
        ''' Test vector dot matrix '''
        rng = numpy.random.RandomState(unittest_tools.fetch_seed())
        v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
        m = theano.shared(numpy.array(rng.uniform(size=(2, 3)),
             dtype='float32'))
        f = theano.function([], theano.dot(v, m), mode=mode_blas_opt)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, T.dot)
        self.assertFunctionContains1(f, Gemv(True))

        # Assert they produce the same output
        assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
        # Assert it works when m has no contiguous dimension
        m.set_value(
                m.get_value(borrow=True)[::-1, ::-1],
                borrow=True)
        assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_mv(self):
        ''' Test matrix dot vector '''
        rng = numpy.random.RandomState(unittest_tools.fetch_seed())
        v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
        m = theano.shared(numpy.array(rng.uniform(size=(3, 2)),
                                       dtype='float32'))
        f = theano.function([], theano.dot(m, v), mode=mode_blas_opt)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, T.dot)
        self.assertFunctionContains1(f, Gemv(True))

        # Assert they produce the same output
        assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
        # Assert it works when m has no contiguous dimension
        m.set_value(
                m.get_value(borrow=True)[::-1, ::-1],
                borrow=True)
        assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_simple(self):
        alpha, beta, a, x, y = [self.shared(value)
             for value in self.get_data()]
        desired_oy = alpha.get_value() * matrixmultiply(a.
            get_value(), x.get_value()) + beta.get_value() * y.get_value()

        oy = alpha * T.dot(a, x) + beta * y

        oy_func = theano.function([], oy, mode=self.mode)

        topo = oy_func.maker.fgraph.toposort()
        self.assertFunctionContains1(oy_func, self.gemv)

        oy_val = oy_func()

        assert_array_almost_equal(desired_oy, oy_val)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_default_beta_y(self):

        vs = self.get_data()
        alpha_v, beta_v, a_v, x_v, y_v = vs
        a = self.shared(a_v)
        x = self.shared(x_v)

        desired_oy = matrixmultiply(a_v, x_v)

        oy = T.dot(a, x)

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv_inplace)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_simple_transpose(self):
        vs = self.get_data()
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha, beta, a, x, y = [self.shared(v) for v in vs]

        desired_oy = alpha_v * matrixmultiply(transpose(a_v),
                                              x_v) + beta_v * y_v

        oy = alpha * T.dot(a.T, x) + beta * y

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_x_stride_transpose(self):
        vs = self.get_data(x_stride=2)
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha, beta, a, x, y = [self.shared(v) for v in vs]

        desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v[::
            2]) + beta_v * y_v

        oy = alpha * T.dot(a.T, x[::2]) + beta * y

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_y_stride_transpose(self):
        vs = self.get_data(y_stride=2)
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha, beta, a, x, y = [self.shared(v) for v in vs]

        desired_oy = alpha_v * matrixmultiply(transpose(a_v),
                                              x_v) + beta_v * y_v[::2]

        oy = alpha * T.dot(a.T, x) + beta * y[::2]

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_a_strides(self):
        vs = self.get_data()
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha, beta, a, x, y = [self.shared(v) for v in vs]
        a_v = a_v[::-1, ::-1]
        a.set_value(
                a.get_value(borrow=True,
                     return_internal_type=True)[::-1, ::-1],
                borrow=True)

        desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v

        oy = alpha * T.dot(a, x) + beta * y

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_a_strides_transpose(self):
        vs = self.get_data()
        alpha_v, beta_v, a_v, x_v, y_v = vs
        alpha, beta, a, x, y = [self.shared(v) for v in vs]
        a_v = a_v[::-1, ::-1]
        a.set_value(
                a.get_value(borrow=True,
                     return_internal_type=True)[::-1, ::-1],
                borrow=True)

        desired_oy = alpha_v * matrixmultiply(transpose(a_v),
                                              x_v) + beta_v * y_v

        oy = alpha * T.dot(a.T, x) + beta * y

        oy_func = theano.function([], oy, mode=self.mode)

        self.assertFunctionContains1(oy_func, self.gemv)

        oy_v = oy_func()
        assert_array_almost_equal(desired_oy, oy_v)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_optimizations_vm(self):
        ''' Test vector dot matrix '''
        f = theano.function([self.x, self.A],
                theano.dot(self.x, self.A),
                mode=self.mode)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, tensor.dot)
        self.assertFunctionContains1(
            f,
            CGemv(inplace=True)
        )

        # Assert they produce the same output
        assert numpy.allclose(f(self.xval, self.Aval),
                numpy.dot(self.xval, self.Aval))

        # Test with negative strides on 2 dims
        assert numpy.allclose(f(self.xval, self.Aval[::-1, ::-1]),
                numpy.dot(self.xval, self.Aval[::-1, ::-1]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_optimizations_mv(self):
        ''' Test matrix dot vector '''
        f = theano.function([self.A, self.y],
                theano.dot(self.A, self.y),
                mode=self.mode)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, tensor.dot)
        self.assertFunctionContains1(
            f,
            CGemv(inplace=True)
        )

        # Assert they produce the same output
        assert numpy.allclose(f(self.Aval, self.yval),
                numpy.dot(self.Aval, self.yval))
        # Test with negative strides on 2 dims
        assert numpy.allclose(f(self.Aval[::-1, ::-1], self.yval),
                numpy.dot(self.Aval[::-1, ::-1], self.yval))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_scan_extra_inputs_hessian(self):
        x = theano.tensor.vector('x')
        A = theano.tensor.matrix('A')
        fc1 = theano.shared(0.5, name='fc1')
        fc2 = theano.shared(0.9, name='fc2')
        y = fc1 * theano.dot(x * x, theano.dot(A, x))
        y.name = 'y'
        gy = theano.tensor.grad(y, x)
        gy.name = 'gy'
        hy, updates = theano.scan(
            lambda i, gy, x: theano.tensor.grad(gy[i] * fc2, x),
            sequences=theano.tensor.arange(gy.shape[0]),
            non_sequences=[gy, x])

        f = theano.function([x, A], hy, allow_input_downcast=True)
        vx = numpy.array([1., 1.], dtype=theano.config.floatX)
        vA = numpy.array([[1., 1.], [1., 0.]], dtype=theano.config.floatX)
        vR = numpy.array([[3.6, 1.8], [1.8, 0.9]], dtype=theano.config.floatX)
        out = f(vx, vA)

        utt.assert_allclose(out, vR)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pushout(self):
        W1 = tensor.matrix('W1')
        W2 = tensor.matrix('W2')
        h0 = tensor.vector('h0')

        def lambda_fn(h, W1, W2):
            return tensor.dot(h, W1 + W2)

        o, _ = theano.scan(lambda_fn,
                           outputs_info=h0,
                           non_sequences=[W1, W2],
                           n_steps=5)

        f = theano.function([h0, W1, W2], o, mode=mode_with_opt)

        scan_node = [x for x in f.maker.fgraph.toposort()
                     if isinstance(x.op,
                                   theano.scan_module.scan_op.Scan)][0]
        assert len([x for x in scan_node.op.fn.maker.fgraph.toposort()
                    if isinstance(x.op, theano.tensor.Elemwise)]) == 0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_alloc_inputs1(self):
        W1 = tensor.matrix('W1')
        W2 = tensor.matrix('W2')
        h0 = tensor.vector('h0')

        def lambda_fn(h, W1, W2):
            return tensor.dot(h, W1 * W2)
        o, _ = theano.scan(lambda_fn,
                           outputs_info=h0,
                           non_sequences=[W1, tensor.zeros_like(W2)],
                           n_steps=5)

        f = theano.function([h0, W1, W2], o, mode=mode_with_opt)
        scan_node = [x for x in f.maker.fgraph.toposort()
                     if isinstance(x.op,
                                   theano.scan_module.scan_op.Scan)][0]
        assert len([x for x in scan_node.op.fn.maker.fgraph.toposort()
                    if isinstance(x.op, theano.tensor.Elemwise)]) == 0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_alloc_inputs2(self):
        raise SkipTest("This tests depends on an optimization for "
                       "scan that has not been implemented yet.")
        W1 = tensor.matrix()
        W2 = tensor.matrix()
        h0 = tensor.vector()

        def lambda_fn(W1, h, W2):
            return W1 * tensor.dot(h, W2)

        o, _ = theano.scan(lambda_fn,
                           sequences=tensor.zeros_like(W1),
                           outputs_info=h0,
                           non_sequences=[tensor.zeros_like(W2)],
                           n_steps=5)

        f = theano.function([h0, W1, W2], o, mode=mode_with_opt)
        scan_node = [x for x in f.maker.fgraph.toposort()
                     if isinstance(x.op,
                                   theano.scan_module.scan_op.Scan)][0]

        assert len([x for x in scan_node.op.fn.maker.fgraph.toposort()
                    if isinstance(x.op, theano.tensor.Elemwise)]) == 0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_strict_mode_ex(self):
        n = 10

        w = numpy.array([[-1, 2], [3, -4]]).astype(theano.config.floatX)
        w_ = theano.shared(w)
        x0 = numpy.array([1, 2]).astype(theano.config.floatX)
        x0_ = tensor.vector(name='x0', dtype=theano.config.floatX)

        def _scan_loose(x):
            return tensor.dot(x, w_)

        ret_strict = theano.scan(_scan_loose,
                               sequences=[],
                               outputs_info=[x0_],
                               n_steps=n,
                               strict=True)

        f_strict = theano.function([x0_], ret_strict[0][-1])
        result_strict = f_strict(x0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_compute_test_value_grad_cast():
    # Test for test values when variables have to be casted
    # Reported by Daniel Renshaw at
    # https://groups.google.com/d/topic/theano-users/o4jK9xDe5WI/discussion
    floatX = theano.config.floatX
    backup = theano.config.compute_test_value
    theano.config.compute_test_value = 'raise'
    try:
        h = tensor.matrix('h')
        h.tag.test_value = numpy.array([[1, 2, 3, 4], [5, 6, 7, 8]],
                                       dtype=floatX)

        w = theano.shared(numpy.random.randn(4, 3).astype(floatX), name='w')

        outputs, _ = theano.scan(lambda i, h, w: (theano.dot(h[i], w), i),
                                 outputs_info=[None, 0], non_sequences=[h, w],
                                 n_steps=3)

        theano.grad(outputs[0].sum(), w)
    finally:
        theano.config.compute_test_value = backup
项目:Relation-Network    作者:subercui    | 项目源码 | 文件源码
def __init__(self, input, n_in, n_out, prefix='Logist'):

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = param_init().uniform((n_in, n_out), name=_p(prefix, 'W'))
        # initialize the baises b as a vector of n_out 0s
        self.b = param_init().constant((n_out,), name=_p(prefix, 'b'))

        # compute vector of class-membership probabilities in symbolic form
        energy = theano.dot(input, self.W) + self.b
        if energy.ndim == 3:
            energy_exp = T.exp(energy - T.max(energy, 2, keepdims=True))
            pmf = energy_exp / energy_exp.sum(2, keepdims=True)
        else:
            pmf = T.nnet.softmax(energy)

        self.p_y_given_x = pmf
        self.y_pred = T.argmax(self.p_y_given_x, axis=-1)

        # compute prediction as class whose probability is maximal in
        # symbolic form

        # parameters of the model
        self.params = [self.W, self.b]
项目:Relation-Network    作者:subercui    | 项目源码 | 文件源码
def _step_forward_with_context(self, x_t, x_m, h_tm1, c_z, c_r, c_h):
        """
        x_t: input at time t
        x_m: mask of x_t
        h_tm1: previous state
        c_x: contex of the rnn
        """
        z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) +
                             T.dot(h_tm1, self.W_hz) + c_z + self.b_z)

        r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) +
                             T.dot(h_tm1, self.W_hr) + c_r + self.b_r)

        can_h_t = T.tanh(T.dot(x_t, self.W_xh) +
                         r_t * T.dot(h_tm1, self.W_hh) + c_h +
                         self.b_h)
        h_t = (1 - z_t) * h_tm1 + z_t * can_h_t

        h_t = x_m[:, None] * h_t + (1. - x_m[:, None]) * h_tm1
        return h_t
项目:Relation-Network    作者:subercui    | 项目源码 | 文件源码
def _step_forward(self, x_t, x_m, h_tm1):
        """
        x_t: input at time t
        x_m: mask of x_t
        h_tm1: previous state
        c_x: contex of the rnn
        """
        z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) +
                             T.dot(h_tm1, self.W_hz) + self.b_z)

        r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) +
                             T.dot(h_tm1, self.W_hr) + self.b_r)

        can_h_t = T.tanh(T.dot(x_t, self.W_xh) +
                         r_t * T.dot(h_tm1, self.W_hh) +
                         self.b_h)
        h_t = (1 - z_t) * h_tm1 + z_t * can_h_t

        h_t = x_m[:, None] * h_t + (1. - x_m[:, None]) * h_tm1
        return h_t
项目:nmt    作者:Playinf    | 项目源码 | 文件源码
def linear(inputs, size, bias, concat=False, dtype=None, scope=None):
    if not isinstance(size, (list, tuple)):
        raise ValueError("size argument must be (input_size, output_size)")

    input_size, output_size = size

    if not isinstance(input_size, (list, tuple)):
        input_size = [input_size]

    if not isinstance(inputs, (list, tuple)):
        inputs = [inputs]

    if len(inputs) != len(input_size):
        raise RuntimeError("unmatched elements found: inputs and input_size")

    results = []

    with variable_scope(scope):
        if concat:
            input_size = sum(input_size)
            inputs = theano.tensor.concatenate(inputs, -1)

            shape = [input_size, output_size]
            matrix = get_variable("matrix", shape, dtype=dtype)
            results.append(theano.dot(inputs, matrix))
        else:
            for i in range(len(input_size)):
                shape = [input_size[i], output_size]
                name = "matrix_%d" % i
                matrix = get_variable(name, shape, dtype=dtype)
                results.append(theano.dot(inputs[i], matrix))

        if bias:
            shape = [output_size]
            bias = get_variable("bias", shape, dtype=dtype)
            results.append(bias)

    if len(results) == 1:
        return results[0]

    return reduce(theano.tensor.add, results)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_csr_correct_output_faster_than_scipy(self):

        # contrast with test_grad, we put csr in float32, csc in float64

        sparse_dtype = 'float32'
        dense_dtype = 'float32'

        a = SparseType('csr', dtype=sparse_dtype)()
        b = tensor.matrix(dtype=dense_dtype)
        d = theano.dot(a, b)
        f = theano.function([a, b], d)

        for M, N, K, nnz in [(4, 3, 2, 3),
                             (40, 30, 20, 3),
                             (40, 30, 20, 30),
                             (400, 3000, 200, 6000),
                         ]:
            spmat = sp.csr_matrix(random_lil((M, N), sparse_dtype, nnz))
            mat = numpy.asarray(numpy.random.randn(N, K), dense_dtype)
            t0 = time.time()
            theano_result = f(spmat, mat)
            t1 = time.time()
            scipy_result = spmat * mat
            t2 = time.time()

            theano_time = t1 - t0
            scipy_time = t2 - t1
            # print 'theano took', theano_time,
            # print 'scipy took', scipy_time
            overhead_tol = 0.002  # seconds
            overhead_rtol = 1.1  # times as long
            utt.assert_allclose(scipy_result, theano_result)
            if (not theano.config.mode in ["DebugMode", "DEBUG_MODE"] and
                theano.config.cxx):
                    self.assertFalse(
                        theano_time > overhead_rtol * scipy_time + overhead_tol,
                        (theano_time,
                         overhead_rtol * scipy_time + overhead_tol,
                         scipy_time, overhead_rtol, overhead_tol))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_cuda(self):
        import theano.sandbox.cuda as cuda
        if not cuda.cuda_available:
            raise SkipTest("Optional package cuda not available")

        a = sparse.csr_matrix('a', dtype='float32')
        b = cuda.float32_shared_constructor(
            numpy.random.rand(3, 4).astype('float32'))
        d = sparse.dot(a, b)
        f = theano.function([a], d)

        a_val = scipy.sparse.csr_matrix(random_lil((5, 3), 'float32', 5))
        d_theano = f(a_val)
        d_numpy = a_val * b.get_value()
        utt.assert_allclose(d_numpy, d_theano)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def matrix_dot(*args):
    """ Shorthand for product between several dots.

    Given :math:`N` matrices :math:`A_0, A_1, .., A_N`, ``matrix_dot`` will
    generate the matrix product between all in the given order, namely
    :math:`A_0 \cdot A_1 \cdot A_2 \cdot .. \cdot A_N`.

    """
    rval = args[0]
    for a in args[1:]:
        rval = theano.tensor.dot(rval, a)
    return rval
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def perform(self, node, inputs, outputs):
        """
        Implements the "reverse-mode" gradient for the eigensystem of
        a square matrix.

        """
        x, w, v, W, V = inputs
        N = x.shape[0]
        outer = numpy.outer

        def G(n):
            return sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])
                       for m in xrange(N) if m != n)

        g = sum(outer(v[:, n], v[:, n] * W[n] + G(n))
                for n in xrange(N))

        # Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a)
        # (triu(a)) only.  This means that partial derivative of
        # eigh(a, 'L') (eigh(a, 'U')) with respect to a[i,j] is zero
        # for i < j (i > j).  At the same time, non-zero components of
        # the gradient must account for the fact that variation of the
        # opposite triangle contributes to variation of two elements
        # of Hermitian (symmetric) matrix. The following line
        # implements the necessary logic.
        out = self.tri0(g) + self.tri1(g).T

        # The call to self.tri0 in perform upcast from float32 to
        # float64 or from int* to int64 in numpy 1.6.1 but not in
        # 1.6.2. We do not want version dependent dtype in Theano.
        # We think it should be the same as the output.
        outputs[0][0] = numpy.asarray(out, dtype=node.outputs[0].dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def matrix_power(M, n):
    """
    Raise a square matrix to the (integer) power n.

    Parameters
    ----------
    M : Tensor variable
    n : Python int
    """
    result = 1
    for i in xrange(n):
        result = theano.dot(result, M)
    return result
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_const_type_in_mul_canonizer():
    input = dmatrix()
    w = dmatrix()
    visb = dvector()
    hidb = dvector()
    betas = dvector()
    a = dvector()

    def sigm(x):
        return 1. / (1 + tensor.exp(-x))

    hid = sigm((tensor.dot(w, input) + hidb) * betas)

    vis_gauss1 = (tensor.dot(w.T, hid) + visb) * betas / (2 * a * a)
    vis_gauss2 = (tensor.dot(w.T, hid) + visb) * betas / (2. * a * a)

    f1 = function([input, w, visb, hidb, betas, a], vis_gauss1)
    f2 = function([input, w, visb, hidb, betas, a], vis_gauss2)

    ival = numpy.random.rand(5, 5)
    wval = numpy.random.rand(5, 5)
    visbval = numpy.random.rand(5)
    hidbval = numpy.random.rand(5)
    betaval = numpy.random.rand(5)
    aval = numpy.random.rand(5)

    utt.assert_allclose(
        f2(ival, wval, visbval, hidbval, betaval, aval),
        f1(ival, wval, visbval, hidbval, betaval, aval))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_allocs_0(self):
        v1 = tensor.vector('v1')
        v2 = tensor.vector('v2')
        m1 = tensor.matrix('m1')
        m2 = tensor.matrix('m2')
        vv2 = numpy.asarray([0, 1], dtype=theano.config.floatX)
        vm2 = numpy.asarray([[1, 2], [4, 5]],
                            dtype=theano.config.floatX)
        vv3 = numpy.asarray([0, 1, 2], dtype=theano.config.floatX)
        vm3 = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                            dtype=theano.config.floatX)
        for _e1 in [(v1, vv2, vv3), (m1, vm2, vm3)]:
            for _e2 in [(v2, vv2, vv3), (m2, vm2, vm3)]:
                for p in [0, 1]:
                    if p == 0:
                        e1 = tensor.zeros_like(_e1[0])
                        e2 = _e2[0]
                    else:
                        e1 = _e1[0]
                        e2 = tensor.zeros_like(_e2[0])
                    o = tensor.dot(e1, e2)
                    f = theano.function([_e1[0], _e2[0]], o, mode=self.mode)
                    f(_e1[1], _e2[1])
                    f(_e1[2], _e2[2])
                    assert numpy.all([not isinstance(n.op, tensor.Dot) for n in
                                      f.maker.fgraph.toposort()])

                    # test that we don't remove shape errors
                    self.assertRaises((ValueError, AssertionError), f,
                                      _e1[1], _e2[2])
                    self.assertRaises((ValueError, AssertionError), f,
                                      _e1[2], _e2[1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_subtensor_of_dot():
    m1 = theano.tensor.matrix()
    m2 = theano.tensor.matrix()
    d1 = numpy.arange(6).reshape((3, 2)).astype(config.floatX)
    d2 = numpy.arange(8).reshape((2, 4)).astype(config.floatX) + 10
    mode = compile.get_default_mode().including("local_subtensor_of_dot")

    def test_equality(a, b):
        return a.shape == b.shape and numpy.allclose(a, b)

    # [cst]
    f = theano.function([m1, m2], theano.dot(m1, m2)[1], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1])
    # DimShuffle happen in FAST_COMPILE
    assert isinstance(topo[-1].op, (T.blas_c.CGemv, T.blas.Gemv, T.DimShuffle))

    # slice
    f = theano.function([m1, m2], theano.dot(m1, m2)[1:2], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1:2])
    assert isinstance(topo[-1].op, (T.blas.Dot22))

    m1 = theano.tensor.tensor3()
    m2 = theano.tensor.tensor3()
    idx = theano.tensor.iscalar()
    d1 = numpy.arange(30).reshape(2, 5, 3).astype(config.floatX)
    d2 = numpy.arange(72).reshape(4, 3, 6).astype(config.floatX) + 100

    f = theano.function([m1, m2, idx], theano.dot(m1, m2)[idx, 1:4, :, idx:], mode=mode)
    assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1, 1:4, :, 1:])
    # if we return the gradients. We need to use same mode as before.
    assert check_stack_trace(f, ops_to_check='last')

    f = theano.function([m1, m2, idx], theano.dot(m1, m2)[1:4, :, idx:, idx], mode=mode)
    assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1:4, :, 1:, 1])

    # Now test that the stack trace is copied over properly,
    # if we return the gradients. We need to use same mode as before.
    assert check_stack_trace(f, ops_to_check='last')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_matrix_matrix(self):
        a, b = matrices('ab')
        g = self.simple_optimize(FunctionGraph([a, b], [tensor.dot(a, b).T]))
        sg = '[dot(InplaceDimShuffle{1,0}(b), InplaceDimShuffle{1,0}(a))]'
        assert str(g) == sg, (str(g), sg)
        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(g, ops_to_check='all'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_row_matrix(self):
        a = vector('a')
        b = matrix('b')
        g = optimize(FunctionGraph(
            [a, b],
            [tensor.dot(a.dimshuffle('x', 0), b).T]),
            level='stabilize')
        sg = '[dot(InplaceDimShuffle{1,0}(b), InplaceDimShuffle{0,x}(a))]'
        assert str(g) == sg, (str(g), sg)
        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(g, ops_to_check='all'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_matrix_col(self):
        a = vector('a')
        b = matrix('b')
        g = optimize(FunctionGraph(
            [a, b],
            [tensor.dot(b, a.dimshuffle(0, 'x')).T]),
            level='stabilize')
        sg = '[dot(InplaceDimShuffle{x,0}(a), InplaceDimShuffle{1,0}(b))]'
        assert str(g) == sg, (str(g), sg)
        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(g, ops_to_check='all'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _gemm(z, a, x, y, b):
        assert a.shape == ()
        assert b.shape == ()
        return b * z + a * numpy.dot(x, y)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_factorised_scalar(self):
        a = T.matrix()
        b = T.matrix()
        c = T.matrix()
        s = theano.shared(numpy.zeros((5, 5)).astype(config.floatX))

        lr1 = T.constant(0.01).astype(config.floatX)
        lr2 = T.constant(2).astype(config.floatX)
        l2_reg = T.constant(0.0001).astype(config.floatX)

        # test constant merge with gemm
        f = theano.function([a, b], updates=[(s, lr1 * T.dot(a, b) +
                                                l2_reg * lr2 * s)],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # 2e-06)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace

        # test factored scalar with merge
        f = theano.function([a, b], updates=[(s, lr1 * (T.dot(a, b) -
                                                        l2_reg * s))],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # -2e-06)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace

        # test factored scalar with merge and neg
        f = theano.function([a, b],
                            updates=[(s, s - lr1 * (s * .0002 + T.dot(a, b)))],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # 0.999998)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_destroy_map4(self):
        """test that dot args can be aliased"""
        Z = shared(self.rand(2, 2), name='Z')
        A = shared(self.rand(2, 2), name='A')
        one = T.constant(1.0).astype(Z.dtype)
        f = inplace_func([], gemm_inplace(Z, one, A, A, one))
        f()
        f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))
        f()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gemm_opt0():
    """Many subgraphs whose dots can be eliminated"""
    X, Y, Z, a, b = XYZab()

    just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])
    just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])
    just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])
    just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])
    just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])

    # with transposes (transposes should be pushed through dot in canonicalize)
    just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])
    just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])
    just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],
            ishapes=[(5, 3), (3, 4), (4, 5), (), ()])

    # with N multiplications instead of just one
    just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])
    just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])
    just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])
    just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_upcasting_scalar_nogemm():
    # Test that the optimization does not crash when the scale has an incorrect
    # dtype, and forces upcasting of the result
    v = T.fmatrix('v')
    w = T.fmatrix('w')
    t = T.fmatrix('t')
    alpha = T.dscalar('a')

    rval = T.dot(w, v) * alpha + t

    f = theano.function([w, v, t, alpha], rval)
    t = f.maker.fgraph.toposort()
    assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
    #theano.printing.debugprint(f, print_type=True)

    v = T.fmatrix('v')
    w = T.fmatrix('w')
    t = T.fmatrix('t')
    alpha = T.cscalar('a')

    on_opt_error = config.on_opt_error
    try:
        config.on_opt_error = 'raise'
        rval = T.dot(w, v) * alpha + t
        f = theano.function([w, v, t, alpha], rval)
    finally:
        config.on_opt_error = on_opt_error

    t = f.maker.fgraph.toposort()
    assert numpy.sum([isinstance(n.op, Gemm) for n in t]) == 0
    #theano.printing.debugprint(f, print_type=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gemm_opt_wishlist():
    X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()

    # with >2 additions of the same T.dot(X,Y term
    just_gemm([X, Y, Z, a, b],
              [(b * b) * Z * a + (a * a) * T.dot(X, Y) + b * T.dot(X, Y)])

    just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y) + T.dot(X, Y)])