Python theano.tensor 模块,scalar() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.scalar()

项目:text2image    作者:emansim    | 项目源码 | 文件源码
def _build_validate_function(self):
        print 'building validate function'
        t1 = datetime.datetime.now()
        data = self.val_data
        captions = self.val_data_captions

        self._index_im_val = T.vector(dtype='int32') # index to the minibatch
        self._index_cap_val = T.vector(dtype='int32')
        self._cap_len_val = T.scalar(dtype='int32')
        self._validate_function = theano.function(inputs=[self._index_im_val, self._index_cap_val, self._cap_len_val, self._run_steps], 
                                                outputs=[self._kl_final, self._logpxz, self._log_likelihood],
                                                updates=self._updates_train,
                                                givens={
                                                    self._x: data[self._index_im_val],
                                                    self._y: captions[self._index_cap_val,0:self._cap_len_val]
                                                })
        t2 = datetime.datetime.now()
        print (t2-t1)
项目:text2image    作者:emansim    | 项目源码 | 文件源码
def _build_validate_function(self, isVal=True):
        print 'building validate function'
        t1 = datetime.datetime.now()
        if isVal:
            data = self.val_data
        else:
            data = self.test_data

        self._index_val = T.scalar(dtype='int32') # index to the minibatch
        self._validate_function = theano.function(inputs=[self._index_val, self._run_steps], 
                                                outputs=[self._kl_final, self._logpxz, self._log_likelihood],
                                                updates=self._updates_train,
                                                givens={
                                                    self._x: data[(self._index_val * batch_size):((self._index_val + 1) * batch_size)].astype(floatX)
                                                })
        t2 = datetime.datetime.now()
        print (t2-t1)
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet50(input_shape=(None, 3, 224, 224))

        if self.verbose: print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob'])))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_vgg16(input_shape=(None, 3, 224, 224), verbose=self.verbose)
        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet152(input_shape=(None, 3, 224, 224))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def compile_iter_fns(self, *args, **kwargs):

        import theano

        import time
        start=time.time()

        # f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
        self.f_pred = theano.function([self.x, self.mask], self.pred.argmax(axis=1), name='f_pred')

        # f_cost = theano.function([x, mask, y], cost, name='f_cost')
        import theano.tensor as tensor
        grads = tensor.grad(self.cost, wrt=list(self.tparams.values()))
        # f_grad = theano.function([x, mask, y], grads, name='f_grad')

        lr = tensor.scalar(name='lr')

        from theanompi.models.lstm import adadelta
        self.f_grad_shared, self.f_update = adadelta(lr, self.tparams, grads,
                                         self.x, self.mask, self.y, self.cost)

        if self.rank==0: print('compile time %.3f' % (time.time()-start))
项目:senti    作者:stevenxxiu    | 项目源码 | 文件源码
def __init__(self, batch_size, emb_X, lstm_params, output_size):
        super().__init__(batch_size)
        self.inputs = [T.imatrix('input'), T.matrix('mask')]
        self.target = T.matrix('target')
        l = InputLayer((batch_size, None), self.inputs[0])
        l_mask = InputLayer((batch_size, None), self.inputs[1])
        l = EmbeddingLayer(l, emb_X.shape[0], emb_X.shape[1], W=emb_X)
        for lstm_param in lstm_params:
            l = LSTMLayer(
                l, lstm_param, grad_clipping=100, nonlinearity=tanh, mask_input=l_mask, only_return_final=True
            )
        l = DenseLayer(l, output_size, nonlinearity=identity)
        self.pred = get_output(l, deterministic=True)
        self.loss = T.mean(aggregate(squared_error(get_output(l), self.target)))
        params = get_all_params(l, trainable=True)
        self.update_params = [T.scalar('learning_rate')]
        self.updates = rmsprop(self.loss, params, learning_rate=self.update_params[0])
        self.metrics = {'train': [rmse], 'val': [rmse]}
        self.network = l
        self.compile()
项目:ReinforcementLearning    作者:persistforever    | 项目源码 | 文件源码
def train_one_batch(self):
        self.actions = tensor.vector(name='actions', dtype='int64')
        self.y = tensor.vector(name='y', dtype=theano.config.floatX)
        cost = self.output_vector[self.actions].sum() / self.actions.shape[0]
        coef = (self.y - self.output_vector[self.actions]).sum() / self.actions.shape[0]
        grads = tensor.grad(cost, wrt=self.params.values())
        grads = [coef*t for t in grads]

        lr = tensor.scalar(name='lr')
        f_update = self._adadelta(lr, self.params, grads)

        def update_function(states, actions, y, yita):
            f_update(numpy.array(yita, dtype=theano.config.floatX))
            return

        return update_function
项目:ObjRecPoseEst    作者:paroj    | 项目源码 | 文件源码
def setupVariables(self):
        floatX = theano.config.floatX  # @UndefinedVariable


        # params
        self.learning_rate = T.scalar('learning_rate',dtype=floatX) 
        self.momentum = T.scalar('momentum',dtype=floatX)

        # input
        self.tvIndex = T.lscalar()  # index to a [mini]batch
        #self.tvIndex.tag.test_value = 10
        self.tvX = self.descrNet.inputVar

        # targets
        self.tvY = T.ivector('y')
        self.tvYr = T.tensor4('yr')
        self.tvPairIdx = T.imatrix('pairIdx')
        self.tvPairLabels = T.ivector('pairLabels')
        self.tvTripletIdx = T.imatrix('tripletIdx')
        self.tvTripletThresh = T.scalar('tripletThresh')
        self.tvTripletPoolIdx = T.imatrix('tripletPoolIdx')
        self.tvTripletPoolThresh = T.scalar('tripletPoolThresh')
        self.tvPosTripletPoolSize = T.iscalar('posTripletPoolSize')
        self.tvNegTripletPoolSize = T.iscalar('negTripletPoolSize')
项目:deep-murasaki    作者:lazydroid    | 项目源码 | 文件源码
def get_update(Ws_s, bs_s):
    x, fx = train.get_model(Ws_s, bs_s)

    # Ground truth (who won)
    y = T.vector('y')

    # Compute loss (just log likelihood of a sigmoid fit)
    y_pred = sigmoid(fx)
    loss = -( y * T.log(y_pred) + (1 - y) * T.log(1 - y_pred)).mean()

    # Metrics on the number of correctly predicted ones
    frac_correct = ((fx > 0) * y + (fx < 0) * (1 - y)).mean()

    # Updates
    learning_rate_s = T.scalar(dtype=theano.config.floatX)
    momentum_s = T.scalar(dtype=theano.config.floatX)
    updates = train.nesterov_updates(loss, Ws_s + bs_s, learning_rate_s, momentum_s)

    f_update = theano.function(
        inputs=[x, y, learning_rate_s, momentum_s],
        outputs=[loss, frac_correct],
        updates=updates,
        )

    return f_update
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_gpu_elemwise_careduce():
    x = theano.tensor.matrix()
    o = (x * x).sum()
    f = theano.function([x], o, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 3
    assert topo[1].op.pre_scalar_op == theano.scalar.sqr
    data = numpy.random.rand(3, 4).astype(theano.config.floatX)
    utt.assert_allclose(f(data), (data * data).sum())

    o = (x * x).sum(axis=1)
    f = theano.function([x], o, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 3
    assert topo[1].op.pre_scalar_op == theano.scalar.sqr
    utt.assert_allclose(f(data), (data * data).sum(axis=1))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_printing_scan():
    # Skip test if pydot is not available.
    if not theano.printing.pydot_imported:
        raise SkipTest('pydot not available')

    def f_pow2(x_tm1):
        return 2 * x_tm1

    state = theano.tensor.scalar('state')
    n_steps = theano.tensor.iscalar('nsteps')
    output, updates = theano.scan(f_pow2,
                                  [],
                                  state,
                                  [],
                                  n_steps=n_steps,
                                  truncate_gradient=-1,
                                  go_backwards=False)
    f = theano.function([state, n_steps],
                        output,
                        updates=updates,
                        allow_input_downcast=True)
    theano.printing.pydotprint(output, scan_graphs=True)
    theano.printing.pydotprint(f, scan_graphs=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_output_order_sorted(self):

        '''
        Tests that the output keys are sorted correctly.
        '''

        x = T.scalar('x')
        y = T.scalar('y')
        z = T.scalar('z')
        e1 = T.scalar('1')
        e2 = T.scalar('2')

        f = theano.function([x, y, z, e1, e2], outputs={'x': x, 'y': y, 'z': z,
                                                        '1': e1, '2': e2})

        assert '1' in str(f.outputs[0])
        assert '2' in str(f.outputs[1])
        assert 'x' in str(f.outputs[2])
        assert 'y' in str(f.outputs[3])
        assert 'z' in str(f.outputs[4])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_output_list_still_works(self):

        '''
        Test that theano.function works if outputs is a list.
        '''

        x = T.scalar('x')

        f = theano.function([x], outputs=[x * 3, x * 2, x * 4, x])

        result = f(5.0)

        assert result[0] == 15.0
        assert result[1] == 10.0
        assert result[2] == 20.0
        assert result[3] == 5.0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_debug_mode_dict(self):

        '''
        Tests that debug mode works where outputs is a dictionary.
        '''

        x = T.scalar('x')

        f = theano.function([x], outputs={'1': x, '2': 2 * x,
                                          '3': 3 * x}, mode="DEBUG_MODE")

        result = f(3.0)

        assert result['1'] == 3.0
        assert result['2'] == 6.0
        assert result['3'] == 9.0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_key_string_requirement(self):
        '''
        Tests that an exception is thrown if a non-string key is used in
        the outputs dictionary.
        '''
        x = T.scalar('x')
        try:
            theano.function([x], outputs={1.0: x})
            raise Exception("Did not throw exception with 1.0 as only key")
        except AssertionError:
            pass
        try:
            theano.function([x], outputs={1.0: x, "a": x**2})
            raise Exception("Did not throw exception with 1.0 as one key")
        except AssertionError:
            pass
        try:
            theano.function([x], outputs={(1, "b"): x, 1.0: x**2})
            raise Exception("Did not throw exception with tuple as key")
        except AssertionError:
            pass
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_jacobian_disconnected_inputs():
    """
    Test that disconnected inputs are properly handled by jacobian.
    """
    v1 = tensor.vector()
    v2 = tensor.vector()
    jacobian_v = theano.gradient.jacobian(1 + v1, v2, disconnected_inputs='ignore')
    func_v = theano.function([v1, v2], jacobian_v)
    val = numpy.arange(4.0).astype(theano.config.floatX)
    assert numpy.allclose(func_v(val, val), numpy.zeros((4, 4)))

    s1 = tensor.scalar()
    s2 = tensor.scalar()
    jacobian_s = theano.gradient.jacobian(1 + s1, s2, disconnected_inputs='ignore')
    func_s = theano.function([s2], jacobian_s)
    val = numpy.array(1.0).astype(theano.config.floatX)
    assert numpy.allclose(func_s(val), numpy.zeros(1))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, index):
        x = as_sparse_variable(x)
        assert x.format in ["csr", "csc"]
        assert len(index) == 2

        input_op = [x]

        for ind in index:

            if isinstance(ind, slice):
                raise Exception("GetItemScalar called with a slice as index!")

            # in case of indexing using int instead of theano variable
            elif isinstance(ind, integer_types):
                ind = theano.tensor.constant(ind)
                input_op += [ind]

            # in case of indexing using theano variable
            elif ind.ndim == 0:
                input_op += [ind]
            else:
                raise NotImplemented()

        return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, y):
        x, y = as_sparse_variable(x), tensor.as_tensor_variable(y)

        assert x.format in ["csr", "csc"]

        # upcast the tensor. Is the cast of sparse done implemented?
        dtype = scalar.upcast(x.type.dtype, y.type.dtype)

        # The magic number two here arises because L{scipy.sparse}
        # objects must be matrices (have dimension 2)
        # Broadcasting of the sparse matrix is not supported.
        # We support nd == 0 used by grad of SpSum()
        assert y.type.ndim in [0, 2]
        out = SparseType(dtype=dtype,
                         format=x.type.format)()
        return gof.Apply(self, [x, y], [out])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def structured_monoid(tensor_op):
    # Generic operation to perform many kinds of monoid element-wise
    # operations on the non-zeros of a sparse matrix.

    # The first parameter must always be a sparse matrix. The other parameters
    # must be scalars which will be passed as argument to the tensor_op.

    def decorator(f):
        def wrapper(*args):
            x = as_sparse_variable(args[0])
            assert x.format in ["csr", "csc"]

            xs = [scalar.as_scalar(arg) for arg in args[1:]]

            data, ind, ptr, shape = csm_properties(x)

            data = tensor_op(data, *xs)

            return CSM(x.format)(data, ind, ptr, shape)
        wrapper.__name__ = str(tensor_op.scalar_op)
        return wrapper
    return decorator
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, a, b):

        a = as_sparse_variable(a)
        assert a.format in ["csr", "csc", "bsr"]

        if not _is_sparse_variable(a):
            raise TypeError('First argument must be of type SparseVariable '
                            'or SparseConstant')
        dtype_out = scalar.upcast(a.type.dtype, b.type.dtype)
        if b.type.ndim != 2:
            raise NotImplementedError('non-matrix b')

        if _is_sparse_variable(b):
            return gof.Apply(self, [a, b],
                             [SparseType(a.type.format, dtype_out)()])
        else:
            return gof.Apply(self, [a, b],
                             [tensor.tensor(dtype_out,
                                            (False, b.type.broadcastable[1]))])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def perform(self, node, inputs, outputs):
        (a_indices, a_indptr, b, g_ab) = inputs
        (out,) = outputs
        g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
        for j in xrange(len(a_indptr) - 1):
            ind0 = a_indptr[j]
            ind1 = a_indptr[j + 1]
            for i_idx in xrange(ind0, ind1):
                i = a_indices[i_idx]
                # Depending on the type of g_ab and b (sparse or dense),
                # the following dot product can result in a scalar or
                # a (1, 1) sparse matrix.
                dot_val = numpy.dot(g_ab[i], b[j].T)
                if isinstance(dot_val, scipy.sparse.spmatrix):
                    dot_val = dot_val[0, 0]
                g_a_data[i_idx] = dot_val
        out[0] = g_a_data
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, alpha, x, y, z):
        if not _is_sparse_variable(x) and not _is_sparse_variable(y):
            # If x and y are tensor, we don't want to use this class
            # We should use Dot22 and Gemm in that case.
            raise TypeError(x)

        dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype,
                                  y.type.dtype, z.type.dtype)
        alpha = tensor.as_tensor_variable(alpha)
        z = tensor.as_tensor_variable(z)

        assert z.ndim == 2
        assert alpha.type.broadcastable == (True,) * alpha.ndim
        if not _is_sparse_variable(x):
            x = tensor.as_tensor_variable(x)
            assert y.format in ["csr", "csc"]
            assert x.ndim == 2
        if not _is_sparse_variable(y):
            y = tensor.as_tensor_variable(y)
            assert x.format in ["csr", "csc"]
            assert y.ndim == 2

        return gof.Apply(self, [alpha, x, y, z],
                         [tensor.tensor(dtype=dtype_out,
                                        broadcastable=(False, False))])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_c(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")

        for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.add, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype)
        for dtype in ["floatX", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype,
                             tensor_op=tensor.all)
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype,
                             tensor_op=tensor.any)
        for dtype in ["int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self, dtype=None, pre_scalar_op=None):
        if dtype is None:
            dtype = theano.config.floatX
        for xsh, tosum in self.cases:
            x = self.type(dtype, [(entry == 1) for entry in xsh])('x')
            if pre_scalar_op is not None:
                x = pre_scalar_op(x)
            if tosum is None:
                tosum = list(range(len(xsh)))
            xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype)
            d = {}
            if pre_scalar_op is not None:
                xv = x.eval({x.owner.inputs[0]: xv})
                d = {pre_scalar_op: pre_scalar_op}
            self._compile_and_check([x],
                                    [self.op(scalar.add, axis=tosum, *d)(x)],
                                    [xv], self.op,
                                    ["local_cut_useless_reduce"],
                                    warn=0 not in xsh)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):
        self.test_vals = [numpy.array(x, dtype=config.floatX) for x in [
            0,
            1,
            numpy.nan,
            numpy.inf,
            -numpy.inf,
            [numpy.nan, numpy.inf, -numpy.inf, 0, 1, -1],
            ]]
        self.scalar = tensor.scalar()
        self.vector = tensor.vector()
        self.mode = get_default_mode()
        if isinstance(self.mode, theano.compile.debugmode.DebugMode):
            # Disable the check preventing usage of NaN / Inf values.
            self.mode = copy(self.mode)
            self.mode.check_isfinite = False
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_mean_default_dtype(self):
        """
        Test the default dtype of a mean().
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [], [0], [1], [0, 1]]
        for idx, dtype in enumerate(imap(str, theano.scalar.all_types)):
            axis = axes[idx % len(axes)]
            x = tensor.matrix(dtype=dtype)
            m = x.mean(axis=axis)
            if dtype in tensor.discrete_dtypes and axis != []:
                assert m.dtype == 'float64'
            else:
                assert m.dtype == dtype, (m, m.dtype, dtype)
            f = theano.function([x], m)
            data = numpy.random.rand(3, 4) * 10
            data = data.astype(dtype)
            f(data)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_prod_without_zeros_default_dtype(self):
        """
        Test the default dtype of a ProdWithoutZeros().
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [], [0], [1], [0, 1]]
        for idx, dtype in enumerate(imap(str, theano.scalar.all_types)):
            axis = axes[idx % len(axes)]
            x = ProdWithoutZeros(axis=axis)(tensor.matrix(dtype=dtype))
            assert x.dtype == dict(
                int8='int64',
                int16='int64',
                int32='int64',
                uint8='uint64',
                uint16='uint64',
                uint32='uint64',
            ).get(dtype, dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_prod_without_zeros_custom_dtype(self):
        """
        Test ability to provide your own output dtype for a ProdWithoutZeros().
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [], [0], [1], [0, 1]]
        idx = 0
        for input_dtype in imap(str, theano.scalar.all_types):
            x = tensor.matrix(dtype=input_dtype)
            for output_dtype in imap(str, theano.scalar.all_types):
                axis = axes[idx % len(axes)]
                prod_woz_var = ProdWithoutZeros(
                        axis=axis, dtype=output_dtype)(x)
                assert prod_woz_var.dtype == output_dtype
                idx += 1
                if ('complex' in output_dtype or
                    'complex' in input_dtype):
                    continue
                f = theano.function([x], prod_woz_var)
                data = numpy.random.rand(2, 3) * 3
                data = data.astype(input_dtype)
                f(data)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):

        for s_left, s_right in [((5, 6), (5, 6)),
                           ((5, 6), (5, 1)),
                           ((5, 6), (1, 6)),
                           ((5, 1), (5, 6)),
                           ((1, 6), (5, 6)),
                           ((2, 3, 4, 5), (2, 3, 4, 5)),
                           ((2, 3, 4, 5), (2, 3, 1, 5)),
                            ((2, 3, 4, 5), (1, 3, 4, 5)),
                            ((2, 1, 4, 5), (2, 3, 4, 5)),
                            ((2, 3, 4, 1), (2, 3, 4, 5))]:
            dtype = theano.config.floatX
            t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
            t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
            t_left_val = numpy.zeros(s_left, dtype=dtype)
            t_right_val = numpy.zeros(s_right, dtype=dtype)
            self._compile_and_check([t_left, t_right],
                            [Elemwise(scalar.add)(t_left, t_right)],
                            [t_left_val, t_right_val], Elemwise)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_kording_bug(self):
        x, y = vectors('xy')
        eps = scalar('eps')
        s = scalar('s')

        #r = theano.tensor.mul(theano.tensor.fill(x, 2.*a), x/a , (y+z) , a)
        #r = theano.tensor.mul((x/a+y) , a, z)
        r = tensor.mul(s - 1,
                       eps + x / s,
                       eps + y / s,
                       s)

        f = function([s, eps, x, y], r ** 2)

        s_val = numpy.asarray(4, dtype=config.floatX)
        eps_val = numpy.asarray(1.e-6, dtype=config.floatX)
        x_val = numpy.asarray([1.5, 2], dtype=config.floatX)
        y_val = numpy.asarray([2.3, 3.1], dtype=config.floatX)

        r0 = f(s_val, eps_val, x_val, y_val)
        r1 = f(s_val, eps_val, x_val, y_val)
        r2 = f(s_val, eps_val, x_val, y_val)

        assert numpy.all(r0 == r1)
        assert numpy.all(r0 == r2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test1(self):
        # basic test that the optimization work with scalar broadcasted
        x = tensor.matrix('x')
        y = tensor.scalar('y')
        z = tensor.matrix('z')
        f = function([x, y, z], tensor.exp(x + y + z)[0], mode=mode_opt)

        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(f, ops_to_check=[
                Subtensor, tensor.DimShuffle]))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.Subtensor)
        assert isinstance(prog[1].op, tensor.DimShuffle)
        assert isinstance(prog[2].op, tensor.Subtensor)
        assert isinstance(prog[3].op.scalar_op, theano.scalar.
                          Composite)  # Composite{add,add}
        assert len(prog) == 4
        f([[0, 1], [2, 3]], 4, [[4, 5], [6, 7]])
              # let debugmode test something
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test5(self):
        # test that we don't lift when we reuse the output of the
        # elemwise for other computation.
        x = tensor.matrix('x')
        y = tensor.vector('y')
        f = function([x, y], [tensor.exp(x + y)[0], tensor.exp(x + y) + x],
                     mode=mode_opt)

        # Opt doesn't apply, so no need for check_stack_trace
        # self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.DimShuffle)
        assert isinstance(prog[1].op.scalar_op, theano.scalar.
                          Composite)  # Composite{add,exp}
        assert prog[2].op == tensor.add
        assert isinstance(prog[3].op, tensor.Subtensor)  # first subtensor
        assert len(prog) == 4
        f([[0, 1], [2, 3]], [4, 5])  # let debugmode test something
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test6(self):
        # basic test that the optimization works with a scalar as input,
        # and a scalar as output (no broadcasting of the scalar needed).
        # The optimization used to fail and display an ERROR message.

        x = tensor.vector('x')
        y = tensor.scalar('y')
        f = function([x, y], tensor.exp(x + y)[0], mode=mode_opt)

        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.Subtensor)
        # Composite{add,exp}
        assert isinstance(prog[1].op.scalar_op, theano.scalar.Composite)
        assert len(prog) == 2
        f([1, 2, 3], 4)  # let debugmode test something
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_and(self):
        mode = theano.compile.get_default_mode().including('canonicalize')

        x = T.scalar('x', dtype='int8')

        for zero, one in [(numpy.int8(0), numpy.int8(1)), (0, 1)]:
            f = theano.function([x], T.and_(x, zero), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(zero, x), mode=mode)
            self.assert_eqs_const(f, 0)

            f = theano.function([x], T.and_(x, one), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)

            f = theano.function([x], T.and_(one, x), mode=mode)
            if f.outputs[0].variable.dtype == x.dtype:
                self.assert_identity(f)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_test_local_remove_useless_assert2(self):
        # remove assert condition that are always true
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 1),
                            mode=mode)
        assert f(1, 1) == 1
        assert f(5, 1) == 5
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 2
        assert topo[1].op == deep_copy_op
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_useless_assert3(self):
        # don't remove assert condition that are always false
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode)

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 0),
                            mode=mode)
        self.assertRaises(AssertionError, f, 1, 0)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 2
        assert len(topo[0].inputs) == 3
        assert topo[1].op == deep_copy_op
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_eq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.eq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ)
        f2 = theano.function([x], T.eq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.ones((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_neq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.neq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.NEQ)
        f2 = theano.function([x], T.neq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.zeros((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_mul(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.mul(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.mul(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx * vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_add(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.add(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.add(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx + vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Add)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcast2(self):
        # test switch(cst, vector, matrix)

        # This case is not optimized for now.
        x = theano.tensor.vector('x', dtype='int32')
        y = theano.tensor.matrix('y', dtype='int64')
        z = theano.tensor.switch(1, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise) and
                    not isinstance(node.op.scalar_op, theano.scalar.basic.Cast)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vx)

        z = theano.tensor.switch(0, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vy)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_add_specialize():
    # test of non-zero dimension
    a = tensor.vector()
    s = tensor.add(tensor.zeros_like(a))
    assert local_add_specialize.transform(s.owner)

    # test of 0-d
    a = tensor.scalar()
    s = tensor.add(tensor.zeros_like(a))
    assert local_add_specialize.transform(s.owner)

    # Test when the 0 input is forcing upcasting
    a = tensor.constant(0, dtype='int64')
    b = tensor.constant(1, dtype='int32')
    s = a + b
    transformed = local_add_specialize.transform(s.owner)
    assert transformed
    assert transformed[0].type == s.type
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_scalar_tensor_scalar():
    dtypes = ['int8', 'int16', 'int32', 'int64',
            'uint8', 'uint16', 'uint32', 'uint64',
            'float32', 'float64',
            'complex64', 'complex128'
            ]

    for dtype in dtypes:
        s_type = theano.scalar.Scalar(dtype=dtype)
        s = s_type()
        t = tensor.tensor_from_scalar(s)
        s2 = tensor.scalar_from_tensor(t)

        f = function([s], s2, mode=mode_opt)
        e = f.maker.fgraph.toposort()
        cast_nodes = [n for n in e
                if isinstance(n.op, (tensor.TensorFromScalar,
                                     tensor.ScalarFromTensor))]
        assert len(cast_nodes) == 0
        f(0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_zero_div():
    """Tests 0/x -> 0"""
    mode = theano.compile.mode.get_default_mode().including("local_zero_div")
    for t in (T.scalar, T.ivector, T.ftensor4):
        x = t('x')
        for op in (T.int_div, T.true_div):
            y = op(0, x)
            g = optimize(FunctionGraph([x], [y]))
            # the division should be gone
            divs = [node for node in g.toposort()
                    if isinstance(node.op, T.elemwise.Elemwise) and
                    isinstance(node.op.scalar_op, type(op.scalar_op))]
            assert len(divs) == 0
            # the output type should match the unoptimized one
            output = g.outputs[0]
            assert output.ndim == y.ndim
            assert output.type == y.type
            # and the output should be zero
            assert theano.tensor.get_scalar_constant_value(output) == 0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_perform(self):
        x = tensor.matrix()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        for shp in [(8, 8), (5, 8), (8, 5)]:
            a = numpy.random.rand(*shp).astype(config.floatX)
            val = numpy.cast[config.floatX](numpy.random.rand())
            out = f(a, val)
            # We can't use numpy.fill_diagonal as it is bugged.
            assert numpy.allclose(numpy.diag(out), val)
            assert (out == val).sum() == min(a.shape)

        # test for 3d tensor
        a = numpy.random.rand(3, 3, 3).astype(config.floatX)
        x = tensor.tensor3()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        val = numpy.cast[config.floatX](numpy.random.rand() + 10)
        out = f(a, val)
        # We can't use numpy.fill_diagonal as it is bugged.
        assert out[0, 0, 0] == val
        assert out[1, 1, 1] == val
        assert out[2, 2, 2] == val
        assert (out == val).sum() == min(a.shape)
项目:fxnn    作者:khaotik    | 项目源码 | 文件源码
def build_model(model_):
    global fn_predict, fn_record
    global g_ozer, g_mdl

    g_ozer = dict(simple=VanillaSGD, adam=AdamSGD)[OZER]()
    g_ozer.lr = LEARN_RATE

    s_x = T.tensor4('x')
    s_y = T.ivector('y')
    s_pdpo = T.scalar()
    s_out = model_(s_x, s_pdpo)

    s_y_onehot = T.extra_ops.to_one_hot(s_y, len(g_dataset.label_map))
    s_loss = T.mean(-s_y_onehot*T.log(s_out + 1e-3))
    s_accr = T.mean( T.switch(
            T.eq(T.argmax(s_out, axis=1), T.argmax(s_y_onehot, axis=1)), 1, 0))

    no_dropout = [(s_pdpo, T.constant(0., dtype=th.config.floatX))]
    fn_predict = th.function(
        [s_x, s_y],
        {'pred':s_out, 'accr':s_accr, 'loss':s_loss},
        givens=no_dropout, profile=PROFILE)
    rec_fetches = {
        'x': s_x, 'y': s_y,
        'pred': s_out}
    rec_fetches.update(g_mdl.params_di)
    fn_record = th.function(
        [s_x, s_y], rec_fetches, givens=no_dropout, profile=PROFILE)
    g_ozer.compile(
        [s_x, s_y],
        s_loss,
        g_mdl.params_di.values(),
        fetches_={'pred': s_out, 'loss': s_loss, 'accr': s_accr},
        givens_=[(s_pdpo, T.constant(TRAIN_PDPO, dtype=th.config.floatX))],
        profile_=PROFILE)
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def step_infer(self, r, q, y, *params):
        '''Step inference function for IRVI.inference scan.

        Args:
            r: theano randomstream variable
            q: T.tensor. Current approximate posterior parameters
            y: T.tensor. Data sample
            params: list of shared variables
        Returns:
            q: T.tensor. New approximate posterior parameters
            cost: T.scalar float. Negative lower bound of current parameters
        '''

        model = self.model
        prior_params = model.get_prior_params(*params)

        h        = (r <= q[None, :, :]).astype(floatX)
        py       = model.p_y_given_h(h, *params)
        log_py_h = -model.conditional.neg_log_prob(y[None, :, :], py)
        log_ph   = -model.prior.step_neg_log_prob(h, *prior_params)
        log_qh   = -model.posterior.neg_log_prob(h, q[None, :, :])
        log_p     = log_py_h + log_ph - log_qh
        w_tilde = get_w_tilde(log_p)
        cost    = -log_p.mean()
        q_ = (w_tilde[:, :, None] * h).sum(axis=0)
        q  = self.inference_rate * q_ + (1 - self.inference_rate) * q
        return q, cost
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def set_optimizer(inputs, cost, tparams, constants, updates, extra_outs,
                  optimizer='sgd', optimizer_args=None,
                  **learning_args):
    '''Sets the parameter update functions with optimizer.

    Args:
        inputs (T.tensor): input variables.
        cost (T.scalar): cost
        tparams (OrderedDict): directionary of tensor parameters
        constants (list): list of constant tensors.
        updates (theano.OrderedUpdates): updates.
        extra_outs (list): list of extra output tensors.
        optimizer (Optional[str]): optimizer string. See `utils.op` for details.
            Defaults to `sgd`.
        optimizer_args (Optional[dict]): optional arguments for optimizer.
        **learning_args: extra kwargs for learning not used.

    Returns:
        theano.function: gradient function.
        theano.function: update function.
        dict: extra learning keyword arguments.

    '''

    if optimizer_args is None:
        optimizer_args = dict()
    grads = T.grad(cost, wrt=itemlist(tparams),
                   consider_constant=constants)

    updates = theano.OrderedUpdates(updates)

    lr = T.scalar(name='lr')
    f_grad_shared, f_grad_updates = eval('op.' + optimizer)(
        lr, tparams, grads, inputs, cost, extra_ups=updates,
        extra_outs=extra_outs, **optimizer_args)

    return f_grad_shared, f_grad_updates, learning_args