Python theano 模块,sparse() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.sparse()

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None):
    '''Instantiate an input data placeholder variable.
    '''
    if shape is None and ndim is None:
        raise Exception('Specify either a shape or ndim value.')
    if shape is not None:
        ndim = len(shape)
    else:
        shape = tuple([None for _ in range(ndim)])

    broadcast = (False,) * ndim
    if sparse:
        _assert_sparse_module()
        x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
    else:
        x = T.TensorType(dtype, broadcast)(name)
    x._keras_shape = shape
    x._uses_learning_phase = False
    return x
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    """Instantiate an input data placeholder variable.
    """
    if dtype is None:
        dtype = floatx()
    if shape is None and ndim is None:
        raise ValueError('Specify either a shape or ndim value.')
    if shape is not None:
        ndim = len(shape)
    else:
        shape = tuple([None for _ in range(ndim)])

    broadcast = (False,) * ndim
    if sparse:
        _assert_sparse_module()
        x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
    else:
        x = T.TensorType(dtype, broadcast)(name)
    x._keras_shape = shape
    x._uses_learning_phase = False
    return x
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    '''Instantiate an input data placeholder variable.
    '''
    if dtype is None:
        dtype = floatx()
    if shape is None and ndim is None:
        raise ValueError('Specify either a shape or ndim value.')
    if shape is not None:
        ndim = len(shape)
    else:
        shape = tuple([None for _ in range(ndim)])

    broadcast = (False,) * ndim
    if sparse:
        _assert_sparse_module()
        x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
    else:
        x = T.TensorType(dtype, broadcast)(name)
    x._keras_shape = shape
    x._uses_learning_phase = False
    return x
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    """Instantiate an input data placeholder variable.
    """
    if dtype is None:
        dtype = floatx()
    if shape is None and ndim is None:
        raise ValueError('Specify either a shape or ndim value.')
    if shape is not None:
        ndim = len(shape)
    else:
        shape = tuple([None for _ in range(ndim)])

    broadcast = (False,) * ndim
    if sparse:
        _assert_sparse_module()
        x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
    else:
        x = T.TensorType(dtype, broadcast)(name)
    x._keras_shape = shape
    x._uses_learning_phase = False
    return x
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def _setup_vars(self, sparse_input):
        '''Setup Theano variables for our network.

        Parameters
        ----------
        sparse_input : bool
            Unused -- theanets does not support autoencoders with sparse input.

        Returns
        -------
        vars : list of theano variables
            A list of the variables that this network requires as inputs.
        '''
        assert not sparse_input, 'Theanets does not support sparse autoencoders!'

        # x represents our network's input (and target outputs).
        self.x = TT.matrix('x')

        # the weight array is provided to ensure that different target values
        # are taken into account with different weights during optimization.
        self.weights = TT.matrix('weights')

        if self.weighted:
            return [self.x, self.weights]
        return [self.x]
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
    """Instantiate an input data placeholder variable.
    """
    if dtype is None:
        dtype = floatx()
    if shape is None and ndim is None:
        raise ValueError('Specify either a shape or ndim value.')
    if shape is not None:
        ndim = len(shape)
    else:
        shape = tuple([None for _ in range(ndim)])

    name = _prepare_name(name, 'placeholder')
    broadcast = (False,) * ndim
    if sparse:
        _assert_sparse_module()
        x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
    else:
        x = T.TensorType(dtype, broadcast)(name)
    x._keras_shape = shape
    x._uses_learning_phase = False
    return x
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparse_tensor_error(self):
        import theano.sparse
        if not theano.sparse.enable_sparse:
            raise SkipTest("Optimization temporarily disabled")
        rng = numpy.random.RandomState(utt.fetch_seed())
        data = rng.rand(2, 3).astype(self.dtype)
        x = self.shared(data)
        y = theano.sparse.matrix('csc', dtype=self.dtype, name='y')
        z = theano.sparse.matrix('csr', dtype=self.dtype, name='z')
        cond = theano.tensor.iscalar('cond')

        self.assertRaises(TypeError, ifelse, cond, x, y)
        self.assertRaises(TypeError, ifelse, cond, y, x)
        self.assertRaises(TypeError, ifelse, cond, x, z)
        self.assertRaises(TypeError, ifelse, cond, z, x)
        self.assertRaises(TypeError, ifelse, cond, y, z)
        self.assertRaises(TypeError, ifelse, cond, z, y)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_csm_properties_csm():
    data = tensor.vector()
    indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
                              tensor.ivector())
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_csm_properties_csm")
    for CS, cast in [(sparse.CSC, sp.csc_matrix),
                     (sparse.CSR, sp.csr_matrix)]:
        f = theano.function([data, indices, indptr, shape],
                            sparse.csm_properties(
                                CS(data, indices, indptr, shape)),
                            mode=mode)
        assert not any(
            isinstance(node.op, (sparse.CSM, sparse.CSMProperties))
            for node in f.maker.fgraph.toposort())
        v = cast(random_lil((10, 40),
                            config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_csm_grad_c():
    raise SkipTest("Opt disabled as it don't support unsorted indices")
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    data = tensor.vector()
    indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
                              tensor.ivector())
    mode = theano.compile.mode.get_default_mode()

    if theano.config.mode == 'FAST_COMPILE':
        mode = theano.compile.Mode(linker='c|py', optimizer='fast_compile')

    mode = mode.including("specialize", "local_csm_grad_c")
    for CS, cast in [(sparse.CSC, sp.csc_matrix), (sparse.CSR, sp.csr_matrix)]:
        cost = tensor.sum(sparse.DenseFromSparse()(CS(data, indices, indptr, shape)))
        f = theano.function(
            [data, indices, indptr, shape],
            tensor.grad(cost, data),
            mode=mode)
        assert not any(isinstance(node.op, sparse.CSMGrad) for node
                       in f.maker.fgraph.toposort())
        v = cast(random_lil((10, 40),
                            config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_mul_s_d():
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_mul_s_d")

    for sp_format in sparse.sparse_formats:
        inputs = [getattr(theano.sparse, sp_format + '_matrix')(),
                  tensor.matrix()]

        f = theano.function(inputs,
                            sparse.mul_s_d(*inputs),
                            mode=mode)

        assert not any(isinstance(node.op, sparse.MulSD) for node
                       in f.maker.fgraph.toposort())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_mul_s_v():
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_mul_s_v")

    for sp_format in ['csr']:  # Not implemented for other format
        inputs = [getattr(theano.sparse, sp_format + '_matrix')(),
                  tensor.vector()]

        f = theano.function(inputs,
                            sparse.mul_s_v(*inputs),
                            mode=mode)

        assert not any(isinstance(node.op, sparse.MulSV) for node
                       in f.maker.fgraph.toposort())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_sampling_dot_csr():
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_sampling_dot_csr")

    for sp_format in ['csr']:  # Not implemented for other format
        inputs = [tensor.matrix(),
                  tensor.matrix(),
                  getattr(theano.sparse, sp_format + '_matrix')()]

        f = theano.function(inputs,
                            sparse.sampling_dot(*inputs),
                            mode=mode)

        if theano.config.blas.ldflags:
            assert not any(isinstance(node.op, sparse.SamplingDot) for node
                       in f.maker.fgraph.toposort())
        else:
            # SamplingDotCSR's C implementation needs blas, so it should not
            # be inserted
            assert not any(isinstance(node.op, sparse.opt.SamplingDotCSR) for node
                       in f.maker.fgraph.toposort())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_equality_case(self):
        """
        Test assuring normal behaviour when values
        in the matrices are equal
        """

        scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]

        if (bool(scipy_ver < [0, 13])):
            raise SkipTest("comparison operators need newer release of scipy")

        x = sparse.csc_matrix()
        y = theano.tensor.matrix()

        m1 = sp.csc_matrix((2, 2), dtype=theano.config.floatX)
        m2 = numpy.asarray([[0, 0], [0, 0]], dtype=theano.config.floatX)

        for func in self.testsDic:

            op = func(y, x)
            f = theano.function([y, x], op)

            self.assertTrue(numpy.array_equal(f(m2, m1),
                                              self.testsDic[func](m2, m1)))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_csm_unsorted(self):
        """
        Test support for gradients of unsorted inputs.
        """
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc', ]:
            for dtype in ['float32', 'float64']:
                x = tensor.tensor(dtype=dtype, broadcastable=(False,))
                y = tensor.ivector()
                z = tensor.ivector()
                s = tensor.ivector()
                # Sparse advanced indexing produces unsorted sparse matrices
                a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,
                                         unsorted_indices=True)[1][0]
                # Make sure it's unsorted
                assert not a.has_sorted_indices
                def my_op(x):
                    y = tensor.constant(a.indices)
                    z = tensor.constant(a.indptr)
                    s = tensor.constant(a.shape)
                    return tensor.sum(
                        dense_from_sparse(CSM(format)(x, y, z, s) * a))
                verify_grad_sparse(my_op, [a.data])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot_sparse_sparse(self):
        # test dot for 2 input sparse matrix
        sparse_dtype = 'float64'
        sp_mat = {'csc': sp.csc_matrix,
                  'csr': sp.csr_matrix,
                  'bsr': sp.csr_matrix}

        for sparse_format_a in ['csc', 'csr', 'bsr']:
            for sparse_format_b in ['csc', 'csr', 'bsr']:
                a = SparseType(sparse_format_a, dtype=sparse_dtype)()
                b = SparseType(sparse_format_b, dtype=sparse_dtype)()
                d = theano.dot(a, b)
                f = theano.function([a, b], theano.Out(d, borrow=True))
                topo = f.maker.fgraph.toposort()
                for M, N, K, nnz in [(4, 3, 2, 3),
                                     (40, 30, 20, 3),
                                     (40, 30, 20, 30),
                                     (400, 3000, 200, 6000),
                                 ]:
                    a_val = sp_mat[sparse_format_a](
                        random_lil((M, N), sparse_dtype, nnz))
                    b_val = sp_mat[sparse_format_b](
                        random_lil((N, K), sparse_dtype, nnz))
                    f(a_val, b_val)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        super(DotTests, self).setUp()
        x_size = (10, 100)
        y_size = (100, 1000)
        utt.seed_rng()

        self.x_csr = scipy.sparse.csr_matrix(
            numpy.random.binomial(1, 0.5, x_size), dtype=theano.config.floatX)
        self.x_csc = scipy.sparse.csc_matrix(
            numpy.random.binomial(1, 0.5, x_size), dtype=theano.config.floatX)
        self.y = numpy.asarray(numpy.random.uniform(-1, 1, y_size),
                               dtype=theano.config.floatX)
        self.y_csr = scipy.sparse.csr_matrix(
            numpy.random.binomial(1, 0.5, y_size), dtype=theano.config.floatX)
        self.y_csc = scipy.sparse.csc_matrix(
            numpy.random.binomial(1, 0.5, y_size), dtype=theano.config.floatX)
        self.v_10 = numpy.asarray(numpy.random.uniform(-1, 1, 10),
                                  dtype=theano.config.floatX)
        self.v_100 = numpy.asarray(numpy.random.uniform(-1, 1, 100),
                                   dtype=theano.config.floatX)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_csc_dense(self):
        x = theano.sparse.csc_matrix('x')
        y = theano.tensor.matrix('y')
        v = theano.tensor.vector('v')

        for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y),
                                 (x, v, self.x_csc, self.v_100),
                                 (v, x, self.v_10, self.x_csc)]:

            f_a = theano.function([x, y], theano.sparse.dot(x, y))
            f_b = lambda x, y: x * y

            utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))

            # Test infer_shape
            self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
                                    [x_v, y_v],
                                    (Dot, Usmm, UsmmCscDense))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_int32_dtype(self):
        # Reported on the theano-user mailing-list:
        # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
        size = 9
        intX = 'int32'

        C = tensor.matrix('C', dtype=intX)
        I = tensor.matrix('I', dtype=intX)

        fI = I.flatten()
        data = tensor.ones_like(fI)
        indptr = tensor.arange(data.shape[0] + 1, dtype='int32')

        m1 = sparse.CSR(data, fI, indptr, (8, size))
        m2 = sparse.dot(m1, C)
        y = m2.reshape(shape=(2, 4, 9), ndim=3)

        f = theano.function(inputs=[I, C], outputs=y)
        i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
        a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
                          dtype=intX)
        f(i, a)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparse_shared_memory():
    # Note : There are no inplace ops on sparse matrix yet. If one is
    # someday implemented, we could test it here.
    a = random_lil((3, 4), 'float32', 3).tocsr()
    m1 = random_lil((4, 4), 'float32', 3).tocsr()
    m2 = random_lil((4, 4), 'float32', 3).tocsr()
    x = SparseType('csr', dtype='float32')()
    y = SparseType('csr', dtype='float32')()

    sdot = theano.sparse.structured_dot
    z = sdot(x * 3, m1) + sdot(y * 2, m2)

    f = theano.function([theano.In(x, mutable=True),
                         theano.In(y, mutable=True)], z, mode='FAST_RUN')

    def f_(x, y, m1=m1, m2=m2):
        return ((x * 3) * m1) + ((y * 2) * m2)

    assert SparseType.may_share_memory(a, a)  # This is trivial
    result = f(a, a)
    result_ = f_(a, a)
    assert (result_.todense() == result.todense()).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_size():
    """
    Ensure the `size` attribute of sparse matrices behaves as in numpy.
    """
    for sparse_type in ('csc_matrix', 'csr_matrix'):
        x = getattr(theano.sparse, sparse_type)()
        y = getattr(scipy.sparse, sparse_type)((5, 7)).astype(config.floatX)
        get_size = theano.function([x], x.size)

        def check():
            assert y.size == get_size(y)
        # We verify that the size is correctly updated as we store more data
        # into the sparse matrix (including zeros).
        check()
        y[0, 0] = 1
        check()
        y[0, 1] = 0
        check()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op(self):
        for format in sparse.sparse_formats:
            for axis in self.possible_axis:
                variable, data = sparse_random_inputs(format,
                                                      shape=(10, 10))

                z = theano.sparse.sp_sum(variable[0], axis=axis)
                if axis is None:
                    assert z.type.broadcastable == ()
                else:
                    assert z.type.broadcastable == (False, )

                f = theano.function(variable, self.op(variable[0], axis=axis))
                tested = f(*data)
                expected = data[0].todense().sum(axis).ravel()
                utt.assert_allclose(expected, tested)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op(self):
        for format in sparse.sparse_formats:
            for shape in zip(range(5, 9), range(3, 7)[::-1]):
                variable, data = sparse_random_inputs(format, shape=shape)

                data[0][0, 0] = data[0][1, 1] = 0

                f = theano.function(variable, self.op(*variable))
                tested = f(*data)
                expected = data[0]
                expected.eliminate_zeros()

                assert all(tested.data == expected.data)
                assert not all(tested.data == 0)

                tested = tested.toarray()
                expected = expected.toarray()
                utt.assert_allclose(expected, tested)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_GetItemList(self):

        a, A = sparse_random_inputs('csr', (4, 5))
        b, B = sparse_random_inputs('csc', (4, 5))
        y = a[0][[0, 1, 2, 3, 1]]
        z = b[0][[0, 1, 2, 3, 1]]

        fa = theano.function([a[0]], y)
        fb = theano.function([b[0]], z)

        t_geta = fa(A[0]).todense()
        t_getb = fb(B[0]).todense()

        s_geta = scipy.sparse.csr_matrix(A[0])[[0, 1, 2, 3, 1]].todense()
        s_getb = scipy.sparse.csc_matrix(B[0])[[0, 1, 2, 3, 1]].todense()

        utt.assert_allclose(t_geta, s_geta)
        utt.assert_allclose(t_getb, s_getb)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_GetItem2Lists(self):

        a, A = sparse_random_inputs('csr', (4, 5))
        b, B = sparse_random_inputs('csc', (4, 5))
        y = a[0][[0, 0, 1, 3], [0, 1, 2, 4]]
        z = b[0][[0, 0, 1, 3], [0, 1, 2, 4]]

        fa = theano.function([a[0]], y)
        fb = theano.function([b[0]], z)

        t_geta = fa(A[0])
        t_getb = fb(B[0])

        s_geta = numpy.asarray(scipy.sparse.csr_matrix(A[0])[[0, 0, 1, 3], [0, 1, 2, 4]])
        s_getb = numpy.asarray(scipy.sparse.csc_matrix(B[0])[[0, 0, 1, 3], [0, 1, 2, 4]])

        utt.assert_allclose(t_geta, s_geta)
        utt.assert_allclose(t_getb, s_getb)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad(self):
        for format in sparse.sparse_formats:
            for i_dtype in sparse.float_dtypes:
                for o_dtype in tensor.float_dtypes:
                    if o_dtype == 'float16':
                        # Don't test float16 output.
                        continue
                    _, data = sparse_random_inputs(
                        format,
                        shape=(4, 7),
                        out_dtype=i_dtype)

                    eps = None
                    if o_dtype == 'float32':
                        eps = 1e-2

                    verify_grad_sparse(Cast(o_dtype), data, eps=eps)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op(self):
        for format in sparse.sparse_formats:
            for out_f in sparse.sparse_formats:
                for dtype in sparse.all_dtypes:
                    blocks = self.mat[format]

                    f = theano.function(
                        self.x[format],
                        self.op_class(
                            format=out_f, dtype=dtype)(*self.x[format]),
                        allow_input_downcast=True)

                    tested = f(*blocks)
                    expected = self.expected_f(blocks,
                                               format=out_f,
                                               dtype=dtype)

                    utt.assert_allclose(expected.toarray(), tested.toarray())
                    assert tested.format == expected.format
                    assert tested.dtype == expected.dtype
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_mul_s_v(self):
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc']:
            for dtype in ['float32', 'float64']:
                x = theano.sparse.SparseType(format, dtype=dtype)()
                y = tensor.vector(dtype=dtype)
                f = theano.function([x, y], mul_s_v(x, y))

                spmat = sp_types[format](random_lil((4, 3), dtype, 3))
                mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)

                out = f(spmat, mat)

                utt.assert_allclose(spmat.toarray() * mat, out.toarray())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_structured_add_s_v(self):
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc']:
            for dtype in ['float32', 'float64']:
                x = theano.sparse.SparseType(format, dtype=dtype)()
                y = tensor.vector(dtype=dtype)
                f = theano.function([x, y], structured_add_s_v(x, y))

                spmat = sp_types[format](random_lil((4, 3), dtype, 3))
                spones = spmat.copy()
                spones.data = numpy.ones_like(spones.data)
                mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)

                out = f(spmat, mat)

                utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)),
                                    out.toarray())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op_ss(self):
        for format in sparse.sparse_formats:
            for dtype in sparse.all_dtypes:
                variable, data = sparse_random_inputs(format,
                                                      shape=(10, 10),
                                                      out_dtype=dtype,
                                                      n=2,
                                                      p=0.1)

                f = theano.function(variable, self.op(*variable))

                tested = f(*data)

                x, y = [m.toarray() for m in data]
                expected = numpy.dot(x, y)

                assert tested.format == format
                assert tested.dtype == expected.dtype
                tested = tested.toarray()
                utt.assert_allclose(tested, expected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op_sd(self):
        for format in sparse.sparse_formats:
            for dtype in sparse.all_dtypes:
                variable, data = sparse_random_inputs(format,
                                                      shape=(10, 10),
                                                      out_dtype=dtype,
                                                      n=2,
                                                      p=0.1)
                variable[1] = tensor.TensorType(dtype=dtype,
                                                broadcastable=(False, False))()
                data[1] = data[1].toarray()

                f = theano.function(variable, self.op(*variable))

                tested = f(*data)
                expected = numpy.dot(data[0].toarray(), data[1])

                assert tested.format == format
                assert tested.dtype == expected.dtype
                tested = tested.toarray()
                utt.assert_allclose(tested, expected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        for format in sparse.sparse_formats:
            for dtype in sparse.all_dtypes:
                (x, ), (x_value, ) = sparse_random_inputs(format,
                                                          shape=(9, 10),
                                                          out_dtype=dtype,
                                                          p=0.1)
                (y, ), (y_value, ) = sparse_random_inputs(format,
                                                          shape=(10, 24),
                                                          out_dtype=dtype,
                                                          p=0.1)
                variable = [x, y]
                data = [x_value, y_value]
                self._compile_and_check(variable,
                                        [self.op(*variable)],
                                        data,
                                        self.op_class)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, y):
        x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y)
        out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
        if self.inplace:
            assert out_dtype == y.dtype

        indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x)
        # We either use CSC or CSR depending on the format of input
        assert self.format == x.type.format
        # The magic number two here arises because L{scipy.sparse}
        # objects must be matrices (have dimension 2)
        assert y.type.ndim == 2
        out = tensor.TensorType(dtype=out_dtype,
                                broadcastable=y.type.broadcastable)()
        return gof.Apply(self,
                         [data, indices, indptr, y],
                         [out])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def local_structured_dot(node):
    if node.op == sparse._structured_dot:
        a, b = node.inputs
        if a.type.format == 'csc':
            a_val, a_ind, a_ptr, a_shape = csm_properties(a)
            a_nsparse = a_shape[0]
            return [sd_csc(a_val, a_ind, a_ptr, a_nsparse, b)]
        if a.type.format == 'csr':
            a_val, a_ind, a_ptr, a_shape = csm_properties(a)
            return [sd_csr(a_val, a_ind, a_ptr, b)]
    return False


# Commented out because
# a) it is only slightly faster than scipy these days, and sometimes a little
# slower, and
# b) the resulting graphs make it very difficult for an op to do size checking
# on the matrices involved.  dimension mismatches are hard to detect sensibly.
# register_specialize(local_structured_dot)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inputs, grads):
        global sparse_module_ref
        x, ilist = inputs
        gz, = grads
        assert len(inputs) == 2
        if self.sparse_grad:
            if x.type.ndim != 2:
                raise TypeError(
                    "AdvancedSubtensor1: you can't take the sparse grad"
                    " from a tensor with ndim != 2. ndim is " +
                    str(x.type.ndim))
            if sparse_module_ref is None:
                import theano.sparse as sparse_module_ref

            rval1 = [sparse_module_ref.construct_sparse_from_list(x, gz,
                                                                  ilist)]
        else:
            rval1 = [advanced_inc_subtensor1(x.zeros_like(), gz, ilist)]
        return rval1 + [DisconnectedType()()] * (len(inputs) - 1)
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def detect_nan(i, node, fn):
    for output in fn.outputs:
        if not isinstance(output[0], np.random.RandomState):
            if sp.sparse.issparse(output[0]):
                nans = np.isnan(output[0].data).any()
            else:
                nans = np.isnan(output[0]).any()
            if nans:
                print('*** NaN detected ***')
                theano.printing.debugprint(node)
                print('Inputs : %s' % [input[0] for input in fn.inputs])
                print('Outputs: %s' % [output[0] for output in fn.outputs])
                break
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def load_toy_data(n_samples=1000, dtype='float32'):
    print('creating Melbourne toy dataset as an inverse problem.')
    print('There are two (if not more) Melbournes, one in Australia and one in Florida, USA')
    mlb_fl_latlon_mean = np.array((28.0836, -80.6081))
    mlb_au_latlon_mean = np.array((-37.8136, 144.9631))
    cov = np.array([[1, 0], [0, 1]])
    # create bivariate gaussians to sample from the means (with variances 1, 1 and correlation 0) Melb, Au samples are two times of Melb, FL
    mlb_fl_samples = np.random.multivariate_normal(mean=mlb_fl_latlon_mean, cov=cov, size=n_samples).astype(dtype)
    mlb_au_samples = np.random.multivariate_normal(mean=mlb_au_latlon_mean, cov=cov, size=n_samples * 2).astype(dtype)

    # plt.scatter(mlb_fl_samples[:, 0], mlb_fl_samples[:, 1], c='blue', s=1)
    # plt.scatter(mlb_au_samples[:, 0], mlb_au_samples[:, 1], c='red', s=1)
    # plt.show()

    X = sp.sparse.csr_matrix(np.random.uniform(-0.1, 0.1, size=(n_samples * 3, 2)) + np.array([1, 0])).astype(dtype)

    Y = np.vstack((mlb_fl_samples, mlb_au_samples))  
    # shuffle X and Y
    indices = np.arange(n_samples * 3)
    np.random.shuffle(indices)
    X = X[indices]
    Y = Y[indices]
    n_train_samples = 2 * n_samples
    n_dev_samples = n_samples / 2
    n_test_samples = 3 * n_samples - n_train_samples - n_dev_samples  
    X_train = X[0:n_train_samples, :]
    X_dev = X[n_train_samples:n_train_samples + n_dev_samples, :]
    X_test = X[n_train_samples + n_dev_samples:n_train_samples + n_dev_samples + n_test_samples, :]
    Y_train = Y[0:n_train_samples, :]
    Y_dev = Y[n_train_samples:n_train_samples + n_dev_samples, :]
    Y_test = Y[n_train_samples + n_dev_samples:n_train_samples + n_dev_samples + n_test_samples, :]
    U_train = [i for i in range(n_train_samples)]
    U_dev = [i for i in range(n_train_samples, n_train_samples + n_dev_samples)]
    U_test = [i for i in range(n_train_samples + n_dev_samples, n_train_samples + n_dev_samples + n_test_samples)]
    userLocation = {}
    for i in range(0, 3 * n_samples):
        lat, lon = Y[i, :]
        userLocation[i] = str(lat) + ',' + str(lon)
    data = (X_train, Y_train, X_dev, Y_dev, X_test, Y_test, U_train, U_dev, U_test, None, None, userLocation, None)
    return data
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def __init__(self, 
                 n_epochs=10, 
                 batch_size=1000, 
                 regul_coef=1e-6,
                 input_size=None,
                 output_size = None, 
                 hid_size=100, 
                 drop_out=False, 
                 dropout_coef=0.5,
                 early_stopping_max_down=10,
                 dtype='float32',
                 autoencoder=100,
                 input_sparse=False,
                 reload=False,
                 ncomp=100,
                 sqerror=False,
                 dataset_name=''):
        self.n_epochs = n_epochs
        self.batch_size = batch_size
        self.regul_coef = regul_coef
        self.hid_size = hid_size
        self.drop_out = drop_out
        self.dropout_coef = dropout_coef
        self.early_stopping_max_down = early_stopping_max_down
        self.dtype = dtype
        self.input_size = input_size
        self.output_size = output_size
        self.autoencoder = autoencoder
        self.sparse = input_sparse
        self.reload = reload
        self.n_bigaus_comp = ncomp
        self.sqerror = sqerror
        self.dataset_name = dataset_name
        logging.info('building nn model with %d hidden size, %d bivariate gaussian components and %d output size' % (self.hid_size, self.n_bigaus_comp, self.output_size) )
        if self.sqerror:
            self.build_squarederror_regression()
        else:
            self.build()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _assert_sparse_module():
    if not th_sparse_module:
        raise ImportError("Failed to import theano.sparse\n"
                          "You probably need to pip install nose-parameterized")
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    if py_all([is_sparse(x) for x in tensors]):
        axis = axis % ndim(tensors[0])
        if axis == 0:
            return th_sparse_module.basic.vstack(tensors, format='csr')
        elif axis == 1:
            return th_sparse_module.basic.hstack(tensors, format='csr')
        else:
            raise Exception('Invalid concat axis for sparse matrix: ' + axis)
    else:
        return T.concatenate([to_dense(x) for x in tensors], axis=axis)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _assert_sparse_module():
    if not th_sparse_module:
        raise ImportError("Failed to import theano.sparse\n"
                          "You probably need to pip install nose-parameterized")
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    if py_all([is_sparse(x) for x in tensors]):
        axis = axis % ndim(tensors[0])
        if axis == 0:
            return th_sparse_module.basic.vstack(tensors, format='csr')
        elif axis == 1:
            return th_sparse_module.basic.hstack(tensors, format='csr')
        else:
            raise ValueError('Invalid concat axis for sparse matrix:', axis)
    else:
        return T.concatenate([to_dense(x) for x in tensors], axis=axis)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _assert_sparse_module():
    if not th_sparse_module:
        raise ImportError("Failed to import theano.sparse\n"
                          "You probably need to pip install nose-parameterized")
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    if py_all([is_sparse(x) for x in tensors]):
        axis = axis % ndim(tensors[0])
        if axis == 0:
            return th_sparse_module.basic.vstack(tensors, format='csr')
        elif axis == 1:
            return th_sparse_module.basic.hstack(tensors, format='csr')
        else:
            raise ValueError('Invalid concat axis for sparse matrix:', axis)
    else:
        return T.concatenate([to_dense(x) for x in tensors], axis=axis)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def _assert_sparse_module():
    if not th_sparse_module:
        raise ImportError("Failed to import theano.sparse\n"
                          "You probably need to pip install nose-parameterized")
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    if py_all([is_sparse(x) for x in tensors]):
        axis = axis % ndim(tensors[0])
        if axis == 0:
            return th_sparse_module.basic.vstack(tensors, format='csr')
        elif axis == 1:
            return th_sparse_module.basic.hstack(tensors, format='csr')
        else:
            raise ValueError('Invalid concat axis for sparse matrix:', axis)
    else:
        return T.concatenate([to_dense(x) for x in tensors], axis=axis)
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def _setup_vars(self, sparse_input):
        '''Setup Theano variables for our network.

        Parameters
        ----------
        sparse_input : bool
            If True, create an input variable that can hold a sparse matrix.
            Defaults to False, which assumes all arrays are dense.

        Returns
        -------
        vars : list of theano variables
            A list of the variables that this network requires as inputs.
        '''
        # x represents our network's input.
        self.x = TT.matrix('x')
        if sparse_input:
            self.x = SS.csr_matrix('x')

        # this variable holds the target outputs for input x.
        self.targets = TT.matrix('targets')

        # the weight array is provided to ensure that different target values
        # are taken into account with different weights during optimization.
        self.weights = TT.matrix('weights')

        if self.weighted:
            return [self.x, self.targets, self.weights]
        return [self.x, self.targets]
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def _setup_vars(self, sparse_input):
        '''Setup Theano variables for our network.

        Parameters
        ----------
        sparse_input : bool
            If True, create an input variable that can hold a sparse matrix.
            Defaults to False, which assumes all arrays are dense.

        Returns
        -------
        vars : list of theano variables
            A list of the variables that this network requires as inputs.
        '''
        # x represents our network's input.
        self.x = TT.matrix('x')
        if sparse_input:
            self.x = SS.csr_matrix('x')

        # for a classifier, this specifies the correct labels for a given input.
        self.labels = TT.ivector('labels')

        # and the weights are reshaped to be just a vector.
        self.weights = TT.vector('weights')

        if self.weighted:
            return [self.x, self.labels, self.weights]
        return [self.x, self.labels]
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _assert_sparse_module():
    if not th_sparse_module:
        raise ImportError("Failed to import theano.sparse\n"
                          "You probably need to pip install nose-parameterized")
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    if py_all([is_sparse(x) for x in tensors]):
        axis = axis % ndim(tensors[0])
        if axis == 0:
            return th_sparse_module.basic.vstack(tensors, format='csr')
        elif axis == 1:
            return th_sparse_module.basic.hstack(tensors, format='csr')
        else:
            raise ValueError('Invalid concat axis for sparse matrix:', axis)
    else:
        return T.concatenate([to_dense(x) for x in tensors], axis=axis)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_dense_from_sparse_sparse_from_dense():
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("local_dense_from_sparse_sparse_from_dense")

    m = theano.tensor.matrix()
    for op in [theano.sparse.csr_from_dense, theano.sparse.csc_from_dense]:
        s = op(m)
        o = theano.sparse.dense_from_sparse(s)
        f = theano.function([m], o, mode=mode)
        # We should just have a deep copy.
        assert len(f.maker.fgraph.apply_nodes) == 1
        f([[1, 2], [3, 4]])