Python theano.tensor 模块,any() 实例源码

我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用theano.tensor.any()

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    '''
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    """
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    '''
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    """
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_c(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")

        for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.add, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype)
        for dtype in ["floatX", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype,
                             tensor_op=tensor.all)
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype,
                             tensor_op=tensor.any)
        for dtype in ["int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, False, True))()
            f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)

            order = f.maker.fgraph.toposort()
            assert 1 == sum([isinstance(node.op, T.CAReduce)
                             for node in order])

            node = [node for node in order if isinstance(node.op,
                                                         tensor.CAReduce)][0]

            op = node.op
            assert isinstance(op, T.CAReduce)
            # -- the leading broadcastable dimension has been dropped
            #   by the local_reduce_broadcastable optimization
            #   now summation is over the original x's dimension 1.
            assert node.inputs[0].ndim == 2, node
            assert op.axis == (0,), op.axis
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def any(self, x, axis=None, keepdims=False):
        '''Bitwise reduction (logical OR).
        '''
        return T.any(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).
    """
    return T.any(x, axis=axis, keepdims=keepdims)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).
    """
    return T.any(x, axis=axis, keepdims=keepdims)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def feedforward(self, inp=None):

        # Instantiate ghost parameters if there are any
        self.instantiate()

        # Parse inp. Set x of first layer of train and that of the model (superclass) to inp when yes.
        if inp is not None:
            self.train[0].x = inp
            self.x = inp
        else:
            # Assume model input has been set.
            self.train[0].x = self.x

        # Return input if train empty
        if not self.train:
            self.y = inp
            return inp

        # Feed forward through hidden layers
        for layernum in range(len(self) - 1):
            self.train[layernum + 1].x = self.train[layernum].feedforward()

        # Feed forward through the last (output) layer and assign to y of the model
        self.y = self.train[-1].feedforward()

        # There might be new update requests to fetch
        self.rebuildupdaterequestlist()

        # return
        return self.y

    # Define decoder feed forward method
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def feedforward(self, inp=None):

        # Instantiate ghost parameters if there are any
        self.instantiate()

        # Parse inp. Set x of first layer of train and that of the model (superclass) to inp when yes.
        if inp is not None:
            self.train[0].x = inp
            self.x = inp
        else:
            # Assume model input has been set.
            self.train[0].x = self.x

        # Return input if train empty
        if not self.train:
            self.y = inp
            return inp

        # Feed forward through hidden layers
        for layernum in range(len(self) - 1):
            self.train[layernum + 1].x = self.train[layernum].feedforward()

        # Feed forward through the last (output) layer and assign to y of the model
        self.y = self.train[-1].feedforward()

        # There might be new update requests to fetch
        self.rebuildupdaterequestlist()

        # return
        return self.y

    # Step method is an alias for feedforward (required for use in recurrent chains)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).
    """
    return T.any(x, axis=axis, keepdims=keepdims)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_perform(self):
        for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
            self.with_linker(gof.PerformLinker(), scalar.add, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.mul, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.maximum, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.minimum, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype,
                             tensor_op=tensor.all)
            self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype,
                             tensor_op=tensor.any)
        for dtype in ["int8", "uint8"]:
            self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype)
            self.with_linker(gof.PerformLinker(), scalar.xor, dtype=dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_perform_nan(self):
        for dtype in ["floatX", "complex64", "complex128"]:
            self.with_linker(gof.PerformLinker(), scalar.add, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.mul, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.maximum, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.minimum, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype,
                             test_nan=True, tensor_op=tensor.any)
            self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype,
                             test_nan=True, tensor_op=tensor.all)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_any_grad(self):
        x = tensor.bmatrix('x')
        x_all = x.any()
        gx = theano.grad(x_all, x)
        f = theano.function([x], gx)
        x_random = self.rng.binomial(n=1, p=0.5, size=(5, 7)).astype('int8')
        for x_val in (x_random,
                      numpy.zeros_like(x_random),
                      numpy.ones_like(x_random)):
            gx_val = f(x_val)
            assert gx_val.shape == x_val.shape
            assert numpy.all(gx_val == 0)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_IncSubtensor_serialize():
    d = numpy.random.normal(0, 0.01, size=(100, 100))
    d = d.astype(theano.config.floatX)

    W = theano.shared(d, name='W')
    i = T.vector('i', dtype='int64')
    j = T.vector('j', dtype='int64')
    t = T.scalar('t')
    if theano.tensor.subtensor.inplace_increment:
        y = (W[i] + W[j] + W[1] + W[i, j]).sum()
    else:
        y = (W[i] + W[j] + W[1]).sum()
    cost = T.sqr(t - y)
    dW = theano.grad(cost, W)
    mode = theano.compile.mode.get_default_mode().excluding('fusion')
    mode = mode.including("local_IncSubtensor_serialize")
    f = theano.function([i, j, t], updates=[(W, W - 0.01 * dW)], mode=mode)
    topo = f.maker.fgraph.toposort()
    adds = [n for n in topo if isinstance(n.op, T.Elemwise) and
            isinstance(n.op.scalar_op, theano.scalar.Add)]
    for a in adds:
        assert not any([inp.owner and
                        isinstance(inp.owner.op,
                                   (tensor.IncSubtensor,
                                    tensor.AdvancedIncSubtensor,
                                    tensor.AdvancedIncSubtensor1))
                        for inp in a.inputs])

    # Now test that the stack trace is copied over properly,
    # if we return the gradients. We need to use same mode as before.
    f = theano.function([i, j, t], dW, mode=mode)
    assert check_stack_trace(f, ops_to_check=[
        tensor.IncSubtensor, tensor.AdvancedIncSubtensor,
        tensor.AdvancedIncSubtensor1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_set_to_inc_subtensor():
    v = theano.tensor.fmatrix()
    s = v[[2, 1]]
    g = s + 3
    r = theano.tensor.set_subtensor(s, g)
    moder = compile.get_default_mode().excluding('local_set_to_inc_subtensor')
    modet = compile.get_default_mode().including('local_set_to_inc_subtensor')
    f1 = theano.function([v], r, mode=moder)
    f2 = theano.function([v], r, mode=modet)

    advi1 = [n for n in f1.maker.fgraph.toposort()
             if isinstance(n.op, tensor.AdvancedIncSubtensor1)]

    advi2 = [n for n in f2.maker.fgraph.toposort()
             if isinstance(n.op, tensor.AdvancedIncSubtensor1)]

    # We only have SetSubtensor in f1
    assert all(n.op.set_instead_of_inc for n in advi1)
    # We don't have any SetSubtensor in f2
    assert all(not n.op.set_instead_of_inc for n in advi2)

    val = numpy.random.randn(3, 2).astype('float32')

    r1 = f1(val)
    r2 = f2(val)

    utt.assert_allclose(r1, r2)

    # Finally, test that the stack trace is copied over properly,
    # before and after optimization.
    assert check_stack_trace(f1, ops_to_check=tensor.AdvancedIncSubtensor1)
    assert check_stack_trace(f2, ops_to_check='all')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_all_assert1(self):
        # remove assert condition that are unknown
        mode = theano.config.mode
        if mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        mode = compile.mode.get_mode(mode).including('local_remove_all_assert')

        x = T.scalar()
        y = T.scalar()
        f = theano.function([x, y], theano.tensor.opt.assert_op(x, y),
                            mode=mode)
        if isinstance(mode, theano.compile.debugmode.DebugMode):
            # DebugMode will run the original version with the Assert
            self.assertRaises(AssertionError, f, 1, 0)
        else:
            f(1, 0)  # Without opt, it should fail.
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1, topo
        assert topo[0].op == deep_copy_op, topo

        mode = compile.mode.get_default_mode()
        a = theano.tensor.opt.assert_op(x, T.eq(x, 0).any())
        f = theano.function([x], a, mode=mode.excluding('unsafe'))
        topo = f.maker.fgraph.toposort()
        a_op = [n for n in topo if isinstance(n.op, T.opt.Assert)]
        assert len(a_op) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_all_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x)], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_1(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x, axis=[0, 2])], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_0(self):
        mode = theano.compile.get_default_mode().including(
            'local_useless_reshape')
        i = T.iscalar('i')
        m = theano.tensor.mgrid[0:i,]
        f = theano.function([i], m, mode=mode)
        topo = f.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_1(self):
        x = theano.tensor.matrix('x')
        r = x.reshape(x.shape)

        m0 = theano.compile.get_default_mode()
        m1 = m0.including('local_useless_reshape')
        f1 = theano.function([x], r, mode=m1)
        topo = f1.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)

        m2 = m1.excluding('ShapeOpt')
        f2 = theano.function([x], r, mode=m2)
        topo = f2.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_2(self):
        x = theano.tensor.matrix('x')
        r = x.reshape([Shape_i(i)(x) for i in xrange(x.ndim)])

        m0 = theano.compile.get_default_mode()
        m1 = m0.including('local_useless_reshape')
        f1 = theano.function([x], r, mode=m1)
        topo = f1.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)

        m2 = m1.excluding('ShapeOpt')
        f2 = theano.function([x], r, mode=m2)
        topo = f2.maker.fgraph.toposort()
        assert not any(isinstance(n.op, tensor.basic.Reshape) for n in topo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_expm1():
    x = matrix('x')
    u = T.scalar('u')

    y = T.exp(x) - 1.
    z = T.exp(x) - 2.
    t = T.exp(x) - x
    s = T.exp(u) - numpy.ones((4, 3)).astype(config.floatX)
    MODE = theano.compile.get_default_mode().including('local_expm1')
    f = function([x], y, mode=MODE)
    g = function([x], z, mode=MODE)
    h = function([x], t, mode=MODE)
    r = function([u], s, mode=MODE)
    x_val = numpy.random.rand(4, 3).astype(config.floatX)
    f_val = f(x_val)
    f_test = function([x], T.expm1(x), mode=MODE)

    utt.assert_allclose(f_val, f_test(x_val))

    assert any(isinstance(n.op, T.Elemwise) and isinstance(n.op.scalar_op, theano.scalar.basic.Expm1)
               for n in f.maker.fgraph.toposort())

    assert not any(isinstance(n.op, T.Elemwise) and isinstance(n.op.scalar_op, theano.scalar.basic.Expm1)
                   for n in g.maker.fgraph.toposort())

    assert not any(isinstance(n.op, T.Elemwise) and isinstance(n.op.scalar_op, theano.scalar.basic.Expm1)
                   for n in h.maker.fgraph.toposort())

    assert not any(isinstance(n.op, T.Elemwise) and isinstance(n.op.scalar_op, theano.scalar.basic.Expm1)
                   for n in r.maker.fgraph.toposort())
项目:statestream    作者:VolkerFischer    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    return T.any(x, axis=axis, keepdims=keepdims)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def get_output_mask(self, train=False):
        X = self.get_input(train)
        return T.any(T.ones_like(X) * (1. - T.eq(X, self.mask_value)), axis=-1)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def get_output(self, train=False):
        X = self.get_input(train)
        return X * T.shape_padright(T.any((1. - T.eq(X, self.mask_value)), axis=-1))
项目:MACA    作者:ppartha03    | 项目源码 | 文件源码
def grad(self, args, g_outs):
        def pgrad(g_out):
            g_out = T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound)
            g_out = ifelse(T.any(T.isnan(g_out)), T.ones_like(g_out)*0.00001, g_out)
            return g_out
        return [pgrad(g_out) for g_out in g_outs]
项目:MACA    作者:ppartha03    | 项目源码 | 文件源码
def grad(self, args, g_outs):
        def pgrad(g_out):
            g_out = T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound)
            g_out = ifelse(T.any(T.isnan(g_out)), T.ones_like(g_out)*0.00001, g_out)
            return g_out
        return [pgrad(g_out) for g_out in g_outs]
项目:MACA    作者:ppartha03    | 项目源码 | 文件源码
def grad(self, args, g_outs):
        def pgrad(g_out):
            g_out = T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound)
            g_out = ifelse(T.any(T.isnan(g_out)), T.ones_like(g_out)*0.00001, g_out)
            return g_out
        return [pgrad(g_out) for g_out in g_outs]
项目:MACA    作者:ppartha03    | 项目源码 | 文件源码
def grad(self, args, g_outs):
        def pgrad(g_out):
            g_out = T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound)
            g_out = ifelse(T.any(T.isnan(g_out)), T.ones_like(g_out)*0.00001, g_out)
            return g_out
        return [pgrad(g_out) for g_out in g_outs]
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).
    '''
    return T.any(x, axis=axis, keepdims=keepdims)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def getupdates(self, cost=None, gradients=None, method='sgd', **kwargs):
        """
        :type cost: theano.tensor.var.TensorVariable
        :param cost: Cost scalar

        :type gradients: list
        :param gradients: List of gradients w.r.t. the corresponding element in the list of parameters

        :type method: str or callable
        :param method: Method for weight update. If callable, should take (params, cost, gradient), in that order.

        :type kwargs: dict
        :param kwargs: Extra arguments for method (if any)
        """

        # Parse cost and gradient
        if cost is None:
            cost = self.C

        if gradients is None:
            gradients = self.dC

        # Make sure there are no ghost variables lurking in the parameter list
        assert not any([isinstance(param, netutils.ghostvar) for param in self.params]), \
            "Uninstantiated ghost variables found in the parameter list. Run feedforward() or cost() method first."

        if method in ['sgd', 'stochastic gradient descent']:
            self.updates = nt.sgd(self.params, cost=cost, gradients=gradients,
                                  learningrate=kwargs["learningrate"] if "learningrate" in kwargs.keys() else None)
        else:
            # This allows method to be a function name string from the netrain py file.
            try:
                if isinstance(method, str):
                    self.updates = netutils.smartfunc(getattr(nt, method))(params=self.params, cost=cost,
                                                                           gradients=gradients, **kwargs)
                elif callable(method):
                    self.updates = netutils.smartfunc(method)(self.params, cost=cost, gradients=gradients, **kwargs)
                else:
                    raise NotImplementedError("Update method evaluation failed.")

            except AttributeError:
                raise NotImplementedError("Update method {} not implemented.".format(method))

        # Append update requests
        self.updates += self.updaterequests

        # Return
        return self.updates

    # Method to compile model functions
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def feedforward(self, inp=None):
        # Parse input
        if inp is not None:
            # Inp is given. Set the x of the first layer in modern
            self.x = inp
            self.trainyard[0].x = inp
        else:
            # Assume model input has been set. Fetch from the list of inputs the correct number of inputs for every
            # coach and set as input
            inplist = pyk.obj2list(self.x)
            inplistcursor = 0
            for coach in pyk.obj2list(self.trainyard[0]):
                # Fetch from the list of inputs
                coachinp = inplist[inplistcursor:inplistcursor+coach.numinp]
                # Increment cursor
                inplistcursor += coach.numinp
                # Delist and set input
                coach.x = pyk.delist(coachinp)

            inp = self.x

        # Complain if trainyard empty
        assert len(self.trainyard) != 0, "Cannot feedforward, trainyard empty."

        # Instantiate
        self.instantiate()

        # Feedforward recursively. Don't take the train-coach analogy too seriously here.
        cache = inp
        for train in self.trainyard:
            if isinstance(train, list):
                # Fetch from the list of inputs the correct number of inputs for every coach and set as input
                inplist = pyk.obj2list(cache)
                inplistcursor = 0
                coachcache = []
                for coach in train:
                    # Fetch coach input
                    coachinp = pyk.delist(inplist[inplistcursor:inplistcursor+coach.numinp])
                    inplistcursor += coach.numinp
                    coachout = coach.feedforward(inp=coachinp)
                    # Store all coach outputs in another cache
                    coachcache.append(coachout)
                cache = coachcache
            else:
                cache = train.feedforward(inp=cache)
            # Flatten any recursive outputs to a linear list
            cache = pyk.delist(list(pyk.flatten(cache)))

        # There might be new update requests to fetch
        self.rebuildupdaterequestlist()

        # Return
        self.y = cache
        return self.y
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def getupdates(self, cost=None, gradients=None, method='sgd', **kwargs):
        """
        :type cost: theano.tensor.var.TensorVariable
        :param cost: Cost scalar

        :type gradients: list
        :param gradients: List of gradients w.r.t. the corresponding element in the list of parameters

        :type method: str or callable
        :param method: Method for weight update. If callable, should take (params, cost, gradient), in that order.

        :type kwargs: dict
        :param kwargs: Extra arguments for method (if any)
        """

        # Parse cost and gradient
        if cost is None:
            cost = self.C

        if gradients is None:
            gradients = self.dC

        # Make sure there are no ghost variables lurking in the parameter list
        assert not any([isinstance(param, netutils.ghostvar) for param in self.params]), \
            "Uninstantiated ghost variables found in the parameter list. Run feedforward() or cost() method first."

        if method in ['sgd', 'stochastic gradient descent']:
            self.updates = nt.sgd(self.params, cost=cost, gradients=gradients,
                                  learningrate=kwargs["learningrate"] if "learningrate" in kwargs.keys() else None)
        else:
            # This allows method to be a function name string from the netrain py file.
            try:
                if isinstance(method, str):
                    self.updates = netutils.smartfunc(getattr(nt, method))(params=self.params, cost=cost,
                                                                           gradients=gradients, **kwargs)
                elif callable(method):
                    self.updates = netutils.smartfunc(method)(self.params, cost=cost, gradients=gradients, **kwargs)
                else:
                    raise NotImplementedError("Update method evaluation failed.")

            except AttributeError:
                raise NotImplementedError("Update method {} not implemented.".format(method))

        # Append update requests
        self.updates += self.updaterequests

        # Return
        return self.updates

    # Method to compile model functions
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_slice():
    # test a simple matrix
    x = tensor.matrix('x')
    mode_unopt = compile.get_default_mode().excluding("local_useless_slice", "local_mul_canonizer")
    mode_opt = compile.get_default_mode().including("local_useless_slice").excluding("local_mul_canonizer")

    # test with and without the useless slice
    o = 2 * x[0, :]
    f_unopt = theano.function([x], o, mode=mode_unopt)
    f_opt = theano.function([x], o, mode=mode_opt)
    test_inp = numpy.random.randint(-10, 10, (4, 4)).astype('float32')
    assert all(f_opt(test_inp)  == f_unopt(test_inp)),\
           "The optimization caused a mismatch in the result"
    # test to see if the slice is truely gone
    apply_node = f_opt.maker.fgraph.toposort()[0]
    subtens = apply_node.op
    assert not any(isinstance(idx, slice) for idx in subtens.idx_list), "Slice should be gone"

    # Now test that the stack trace is copied over properly,
    # before before and after optimization.
    assert check_stack_trace(f_unopt, ops_to_check='all')
    assert check_stack_trace(f_opt, ops_to_check='all')

    # test a 4d tensor
    z = tensor.tensor4('z')
    o2 = z[1, :, :, 1]
    o3 = z[0, :, :, :]
    f_opt_check = theano.function([z], o2, mode=mode_opt)
    f_opt_check_apply = theano.function([z], o3, mode=mode_opt)

    # The optimization shouldn't apply here
    apply_node = f_opt_check.maker.fgraph.toposort()[0]
    subtens = apply_node.op
    assert [isinstance(idx, slice) for idx in subtens.idx_list].count(True) == 2
    # But it should here
    apply_node = f_opt_check_apply.maker.fgraph.toposort()[0]
    subtens = apply_node.op
    assert not any(isinstance(idx, slice) for idx in subtens.idx_list)

    # Finally, test that the stack trace is copied over properly,
    # before before and after optimization.
    assert check_stack_trace(f_opt_check, ops_to_check=Subtensor)
    assert check_stack_trace(f_opt_check_apply, ops_to_check=Subtensor)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_scalar6(self):
        # General case with one slice and one index
        # var[b:e:s][i]
        x = tensor.matrix('x')
        b = tensor.iscalar('b')
        e = tensor.iscalar('e')
        s = tensor.iscalar('s')
        i = tensor.iscalar('i')
        f = function([x, b, e, s, i], x[b:e:s][i], mode=mode_opt)

        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(f, ops_to_check=Subtensor))

        #theano.printing.debugprint(f, print_type=True)

        topo = f.maker.fgraph.toposort()
        # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
        assert len([t for t in topo if isinstance(t.op, tensor.
                                                  Subtensor)]) == 1
        # print topo[-1].op
        assert isinstance(topo[-1].op, DeepCopyOp)

        b_r = self.rng.permutation(list(range(-4, 4)))[:3]
        e_r = self.rng.permutation(list(range(-4, 4)))[:3]
        i_r = self.rng.permutation(list(range(-4, 4)))[:3]

        s_r = self.rng.permutation([-3, -2, -1, 1, 2, 3])[:3]

        for x_s in self.x_shapes:
            n_index_err = 0
            n_ok = 0
            x_val = self.rng.uniform(size=x_s).astype(config.floatX)
            for b_v in b_r:
                for e_v in e_r:
                    for s_v in s_r:
                        for i_v in i_r:
                            # The index could be out of bounds
                            # In that case, an Exception should be raised,
                            # otherwise, we let DebugMode check f
                            try:
                                x_val[b_v:e_v:s_v][i_v]
                            except IndexError:
                                n_index_err += 1
                                self.assertRaises(IndexError,
                                                  f, x_val, b_v, e_v, s_v, i_v)
                            else:
                                # Executed if the "try" clause did not raise
                                # any exception
                                n_ok += 1
                                f(x_val, b_v, e_v, s_v, i_v)

            # print 'shape: %s' % (x_s,)
            # print '%% OK: %f' % (float(n_ok) * 100 / (n_ok + n_index_err))