Python theano.tensor 模块,all() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.all()

项目:lstmprovisor-python    作者:Impro-Visor    | 项目源码 | 文件源码
def note_to_encoding(self, chosen_note, relative_position, low_bound, high_bound):
        """
        Convert a chosen note back into an encoded form

        Parameters:
            relative_position: A theano tensor of shape (...) giving the current relative position
            chosen_note: A theano tensor of shape (...) giving an index into encoded_probs

        Returns:
            sampled_output: A theano tensor (float32) of shape (..., ENCODING_WIDTH) that is
                sampled from encoded_probs. Should have the same representation as encoded_form from
                encode_melody
        """
        new_idx = T.switch(chosen_note<2, chosen_note, chosen_note+low_bound-relative_position+self.WINDOW_RADIUS)
        new_idx = T.opt.Assert("new_idx should be less than {}".format(self.ENCODING_WIDTH))(new_idx, T.all(new_idx < self.ENCODING_WIDTH))
        sampled_output = T.extra_ops.to_one_hot(new_idx, self.ENCODING_WIDTH)
        return sampled_output
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def applyparams(self, params=None, cparams=None):
        """This method applies numerical (or theano shared) parameters to the layer."""
        # Generic method for applying parameters
        if params is not None:
            # Convert to numeric (in case params is symbolic)
            params = netutils.sym2num(params)
            # Loop over all params, and set values
            for param, value in zip(self.params, params):
                param.set_value(value)

        if cparams is not None:
            # Convert to numeric
            cparams = netutils.sym2num(cparams)
            # Loop over all cparams and set values
            for cparam, value in zip(self.cparams, cparams):
                cparam.set_value(value)

    # Method to activate encoder or decoder
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def __add__(self, other):
        """Stack layers to build a network."""
        # Make sure the number of inputs/outputs check out
        assert self.numout == other.numinp, "Cannot chain a component with {} output(s) " \
                                            "with one with {} input(s)".format(self.numout, other.numinp)

        if isinstance(other, layertrain):
            # Make a layertrain only if chain is linear (i.e. no branches)
            # other.numout = 1 for other a layertrain
            if self.numinp > 1:
                return layertrainyard([self, other])
            else:
                return layertrain([self] + other.train)
        elif isinstance(other, layer):
            # Make a layertrain only if chain is linear (i.e. no branches)
            if all([num == 1 for num in [self.numinp, self.numout, other.numinp, other.numout]]):
                return layertrain([self] + [other])
            else:
                return layertrainyard([self, other])
        elif isinstance(other, layertrainyard):
            return layertrainyard([self] + other.trainyard)
        else:
            raise TypeError('Unrecognized layer class.')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_c(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")

        for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.add, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype)
        for dtype in ["floatX", "int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype,
                             tensor_op=tensor.all)
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype,
                             tensor_op=tensor.any)
        for dtype in ["int8", "uint8"]:
            self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype)
            self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_recursive_lift(self):
        v = T.vector(dtype="float64")
        m = T.matrix(dtype="float64")
        out = ((v + 42) * (m + 84)).T
        g = FunctionGraph([v, m], [out])
        init_str_g = ("[InplaceDimShuffle{1,0}(Elemwise{mul,no_inplace}"
                      "(InplaceDimShuffle{x,0}(Elemwise{add,no_inplace}"
                      "(<TensorType(float64, vector)>, "
                      "InplaceDimShuffle{x}(TensorConstant{42}))), "
                      "Elemwise{add,no_inplace}"
                      "(<TensorType(float64, matrix)>, "
                      "InplaceDimShuffle{x,x}(TensorConstant{84}))))]")
        self.assertTrue(str(g) == init_str_g)
        new_out = local_dimshuffle_lift.transform(g.outputs[0].owner)[0]
        new_g = FunctionGraph(g.inputs, [new_out])
        opt_str_g = ("[Elemwise{mul,no_inplace}(Elemwise{add,no_inplace}"
                     "(InplaceDimShuffle{0,x}(<TensorType(float64, vector)>), "
                     "InplaceDimShuffle{x,x}(TensorConstant{42})), "
                     "Elemwise{add,no_inplace}(InplaceDimShuffle{1,0}"
                     "(<TensorType(float64, matrix)>), "
                     "InplaceDimShuffle{x,x}(TensorConstant{84})))]")
        self.assertTrue(str(new_g) == opt_str_g)
        # Check stacktrace was copied over correctly after opt was applied
        self.assertTrue(check_stack_trace(new_g, ops_to_check='all'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_kording_bug(self):
        x, y = vectors('xy')
        eps = scalar('eps')
        s = scalar('s')

        #r = theano.tensor.mul(theano.tensor.fill(x, 2.*a), x/a , (y+z) , a)
        #r = theano.tensor.mul((x/a+y) , a, z)
        r = tensor.mul(s - 1,
                       eps + x / s,
                       eps + y / s,
                       s)

        f = function([s, eps, x, y], r ** 2)

        s_val = numpy.asarray(4, dtype=config.floatX)
        eps_val = numpy.asarray(1.e-6, dtype=config.floatX)
        x_val = numpy.asarray([1.5, 2], dtype=config.floatX)
        y_val = numpy.asarray([2.3, 3.1], dtype=config.floatX)

        r0 = f(s_val, eps_val, x_val, y_val)
        r1 = f(s_val, eps_val, x_val, y_val)
        r2 = f(s_val, eps_val, x_val, y_val)

        assert numpy.all(r0 == r1)
        assert numpy.all(r0 == r2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_nested_gpu(self):
        import theano.sandbox.cuda as cuda
        if not cuda.cuda_available:
            raise SkipTest("cuda not available")

        import theano.sandbox.cuda.opt

        y = self.times_2(self.x)
        z = self.times_3(y)
        f = theano.function([self.x], cuda.gpu_from_host(z),
                mode=theano.compile.mode.get_default_mode().including('gpu'))
        topo = f.maker.fgraph.toposort()
        if config.mode != "FAST_COMPILE":
            assert len(topo) == 2
            assert topo[1].op == cuda.gpu_from_host
        # topo1 is doing the composite work on the CPU. Auto-generation of
        # GPU code for ops with support code is not possible.
        fval = numpy.asarray(f([1, 2, 3]))
        assert numpy.all(fval == [6, 12, 18]), fval
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test4(self):
        # basic test that the optimization doesn't work with broadcasting
        # ... It *could* be extended to,
        # ... but right now it doesn't, so it shouldn't try.
        x = tensor.matrix('x')
        y = tensor.vector('y')
        f = function([x, y], tensor.exp(x + y)[0], mode=mode_opt)

        # Opt doesn't apply, so no need for check_stack_trace
        # self.assertTrue(check_stack_trace(f, ops_to_check='all'))

        prog = f.maker.fgraph.toposort()
        assert isinstance(prog[0].op, tensor.DimShuffle)
        assert prog[1].op == tensor.add
        assert isinstance(prog[2].op, tensor.Subtensor)  # first subtensor
        assert prog[3].op == inplace.exp_inplace
        assert len(prog) == 4
        f([[0, 1], [2, 3]], [4, 5])  # let debugmode test something
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_const4(self):
        # var[const1::][:const2]
        x = tensor.matrix('x')
        for idx1 in xrange(-7, 7):
            for idx2 in xrange(-7, 7):
                f = function([x], x[idx1:][:idx2], mode=mode_opt)

                # Check stacktrace was copied over correctly after opt was applied
                self.assertTrue(check_stack_trace(f, ops_to_check='all'))

                #theano.printing.debugprint(f, print_type=True)
                topo = f.maker.fgraph.toposort()
                # print [t for t in topo if isinstance(t.op, tensor.Subtensor)]
                assert len([t for t in topo
                            if isinstance(t.op, tensor.Subtensor)]) == 1
                # print topo[-1].op
                assert isinstance(topo[-1].op, DeepCopyOp)

                for x_s in self.x_shapes:
                    x_val = self.rng.uniform(size=x_s).astype(config.floatX)
                    f(x_val)  # let debugmode test something
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_eq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.eq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ)
        f2 = theano.function([x], T.eq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.ones((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_neq(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x, y], T.neq(x, y), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx, vy)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, T.Elemwise)
        assert isinstance(topo[0].op.scalar_op, theano.scalar.NEQ)
        f2 = theano.function([x], T.neq(x, x), mode=self.mode)
        assert numpy.all(f2(vx) == numpy.zeros((5, 4)))
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 3
        assert isinstance(topo2[-1].op, T.Alloc)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_mul(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.mul(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.mul(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx * vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_add(self):
        x = T.dmatrix()
        y = T.dmatrix()
        f = theano.function([x], T.add(x), mode=self.mode)
        vx = numpy.random.rand(5, 4)
        vy = numpy.random.rand(5, 4)
        f(vx)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert topo[0].op == deep_copy_op
        f2 = theano.function([x, y], T.add(x, y), mode=self.mode)
        assert numpy.all(f2(vx, vy) == vx + vy)
        topo2 = f2.maker.fgraph.toposort()
        assert len(topo2) == 1
        assert isinstance(topo2[0].op, T.Elemwise)
        assert isinstance(topo2[0].op.scalar_op, theano.scalar.Add)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_constant_folding():
    """ Test that constant folding get registered at fast_compile

    An error removed that registration during the registration.
    """
    x = tensor.dvector()
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([x], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2

    # Test that we do not crash when constant folding elemwise scalar
    # as they should not generate c code.

    x = tensor.constant(3)
    assert x.ndim == 0
    mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
    f = theano.function([], [x * 2, x + x], mode=mode)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 2
    assert all([isinstance(n.op, DeepCopyOp) for n in topo])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcast2(self):
        # test switch(cst, vector, matrix)

        # This case is not optimized for now.
        x = theano.tensor.vector('x', dtype='int32')
        y = theano.tensor.matrix('y', dtype='int64')
        z = theano.tensor.switch(1, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise) and
                    not isinstance(node.op.scalar_op, theano.scalar.basic.Cast)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vx)

        z = theano.tensor.switch(0, x, y)
        f = theano.function([x, y], z, mode=self.mode)
        assert len([node.op for node in f.maker.fgraph.toposort() if
                    isinstance(node.op, theano.tensor.Elemwise)]) == 0
        vx = numpy.array([4, 5, 6], dtype='int32')
        vy = numpy.array([[7, 8, 9], [10, 11, 12]], dtype='int64')
        assert numpy.all(f(vx, vy) == vy)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, False, True))()
            f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)

            order = f.maker.fgraph.toposort()
            assert 1 == sum([isinstance(node.op, T.CAReduce)
                             for node in order])

            node = [node for node in order if isinstance(node.op,
                                                         tensor.CAReduce)][0]

            op = node.op
            assert isinstance(op, T.CAReduce)
            # -- the leading broadcastable dimension has been dropped
            #   by the local_reduce_broadcastable optimization
            #   now summation is over the original x's dimension 1.
            assert node.inputs[0].ndim == 2, node
            assert op.axis == (0,), op.axis
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_join_make_vector():
    a, b, c, d, e = tensor.scalars('abcde')
    v = tensor.vector('v')
    mv = MakeVector(config.floatX)
    s = tensor.join(0, mv(a), v, mv(b, c), mv(d, e))
    f = function([a, b, c, d, e, v], s, mode=mode_opt)
    theano.printing.debugprint(f)
    val = f(1, 2, 3, 4, 6, [7, 8])
    assert numpy.all(val == [1, 7, 8, 2, 3, 4, 6])
    e = f.maker.fgraph.toposort()
    assert len([n for n in e if isinstance(n.op, Join)]) == 1
    assert all([not isinstance(n.op, Join) or len(n.inputs) == 4
                for n in e if isinstance(n.op, Join)])
    assert f.maker.fgraph.outputs[0].dtype == config.floatX

    assert check_stack_trace(f, ops_to_check='all')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_split():
    x = tensor.matrix('x')
    splits = tensor.ivector('splits')
    opt = tensor.split(x, splits, n_splits=1)
    nonopt = tensor.split(x, splits, n_splits=3)

    mode = compile.get_default_mode().including("local_useless_split")
    f_opt = theano.function([x, splits], opt, mode=mode)
    f_nonopt = theano.function([x, splits], nonopt, mode=mode)

    f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])
    f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])
    graph_opt = f_opt.maker.fgraph.toposort()
    graph_nonopt = f_nonopt.maker.fgraph.toposort()

    assert isinstance(graph_opt[-1].op, DeepCopyOp)
    assert len(graph_nonopt)==1
    assert isinstance(graph_nonopt[0].op, tensor.Split)

    assert check_stack_trace(f_opt, ops_to_check=[Assert])
    assert check_stack_trace(f_nonopt, ops_to_check='all')
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def zeros(shape, dtype=_FLOATX, name=None):
    '''Instantiates an all-zeros variable.
    '''
    return variable(np.zeros(shape), dtype, name)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ones(shape, dtype=_FLOATX, name=None):
    '''Instantiates an all-ones variable.
    '''
    return variable(np.ones(shape), dtype, name)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def all(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical AND).
    '''
    return T.all(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def zeros(shape, dtype=None, name=None):
    """Instantiates an all-zeros variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.zeros(shape), dtype, name)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ones(shape, dtype=None, name=None):
    """Instantiates an all-ones variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).
    """
    return T.all(x, axis=axis, keepdims=keepdims)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def zeros(shape, dtype=None, name=None):
    '''Instantiates an all-zeros variable.
    '''
    if dtype is None:
        dtype = floatx()
    return variable(np.zeros(shape), dtype, name)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ones(shape, dtype=None, name=None):
    '''Instantiates an all-ones variable.
    '''
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def all(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical AND).
    '''
    return T.all(x, axis=axis, keepdims=keepdims)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def zeros(shape, dtype=None, name=None):
    """Instantiates an all-zeros variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.zeros(shape), dtype, name)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def ones(shape, dtype=None, name=None):
    """Instantiates an all-ones variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).
    """
    return T.all(x, axis=axis, keepdims=keepdims)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def activate(self, what='all'):
        """Use this method to activate the encoder and/or decoder."""
        if what == 'enc' or what == 'all':
            self.encoderactive = True

        if what == 'dec' or what == 'all':
            self.decoderactive = True

    # Method to deactivate encoder or decoder
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def deactivate(self, what='all'):
        """Use this method to deactivate the encoder and/or decoder."""
        if what == 'enc' or what == 'all':
            self.encoderactive = False

        if what == 'dec' or what == 'all':
            self.decoderactive = False

    # Method for infering output shapes
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def _bordermode(self):
        # Logic to find what bordermode goes in to the conv interface
        # Find out if padding is compatible with DNN
        dnnpaddable = all([all([dimpad == pad[0] for dimpad in pad]) for pad in self.padding])
        # Compute the dnn pad value
        if dnnpaddable:
            dnnpad = [pad[0] for pad in self.padding]
        else:
            dnnpad = None

        # Whether to trim after conv
        trim = False

        # Get bordermode if padding is [0, 0]
        if dnnpad == [0, 0]:
            if self.convmode == 'same':
                if all([ks % 2 == 1 for ks in self.kersize]):
                    bordermode = 'half'
                else:
                    bordermode = 'full'
                    trim = True
            elif self.convmode == 'valid':
                bordermode = 'valid'
            else:
                bordermode = 'full'
        elif dnnpad is None:
            if self.convmode == 'same':
                bordermode = 'full'
                trim = True
            elif self.convmode == 'valid':
                bordermode = 'valid'
            else:
                bordermode = 'full'
        else:
            bordermode = dnnpad

        return dnnpaddable, bordermode, trim

    # Trims the edges of the convolution result to compensate for zero padding in full convolution.
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def deactivate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = False

        if what == 'dec' or what == 'all':
            self.decoderactive = False

    # Method to infer output shape
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def activate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = True

        if what == 'dec' or what == 'all':
            self.decoderactive = True

    # Method to deactivate layer
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def deactivate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = False

        if what == 'dec' or what == 'all':
            self.decoderactive = False

    # Infer output shape
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def activate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = True

        if what == 'dec' or what == 'all':
            self.decoderactive = True

    # Method to deactivate layer
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def deactivate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = False

        if what == 'dec' or what == 'all':
            self.decoderactive = False

    # Infer output shape
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def deactivate(self, what='all'):
        if what == 'enc' or what == 'all':
            self.encoderactive = False

        if what == 'dec' or what == 'all':
            self.decoderactive = False
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def zeros(shape, dtype=None, name=None):
    """Instantiates an all-zeros variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.zeros(shape), dtype, name)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def ones(shape, dtype=None, name=None):
    """Instantiates an all-ones variable.
    """
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).
    """
    return T.all(x, axis=axis, keepdims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _preprocess_conv2d_kernel(kernel, data_format):
    # As of Keras 2.0.0, all kernels are normalized
    # on the format `(rows, cols, input_depth, depth)`,
    # independently of `data_format`.
    # Theano expects `(depth, input_depth, rows, cols)`.
    kernel = kernel.dimshuffle((3, 2, 0, 1))
    return kernel
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _preprocess_conv3d_kernel(kernel, data_format):
    # As of Keras 2.0.0, all kernels are normalized
    # on the format `(space, input_depth, depth)`,
    # independently of `data_format`.
    # Theano expects `(depth, input_depth, space)`.
    kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
    return kernel
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def with_linker(self, linker):
        for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
                                  ((1, 2, 3), (1, 2), (2, 3)),
                                  ((1, 2, 1, 3), (1, 3), (2, 3)),
                                  ((2, 3, 4), (2, 1, 0), (4, 3, 2)),
                                  ((2, 3, 4), ('x', 2, 1, 0, 'x'),
                                   (1, 4, 3, 2, 1)),
                                  ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
                                  ((1, 1, 4), (1, 2), (1, 4)),
                                  ((1, 1, 1), (), ()),
                                  ((1,), ('x', 'x'), (1, 1))]:
            ib = [(entry == 1) for entry in xsh]
            x = self.type(self.dtype, ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            assert f(numpy.ones(xsh, dtype=self.dtype)).shape == zsh
            # test that DimShuffle.infer_shape work correctly
            x = self.type(self.dtype, ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x],
                                                  [e.shape])).make_function()
            assert all(f(numpy.ones(xsh, dtype=self.dtype))) == all(zsh)

        # Test when we drop a axis that is not broadcastable
        ib = [False, True, False]
        x = self.type(self.dtype, ib)('x')
        self.assertRaises(ValueError, self.op, ib, shuffle)

        # Test when we drop a axis that don't have shape 1
        ib = [True, True, False]
        x = self.type(self.dtype, ib)('x')
        e = self.op(ib, (1, 2))(x)
        f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
        self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))

        # Test that we can't take a dimensions multiple time
        xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
        ib = [False, True, False]
        x = self.type(self.dtype, ib)('x')
        self.assertRaises(ValueError, DimShuffle, ib, shuffle)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def with_linker_inplace(self, linker, op, type, rand_val):
        for xsh, ysh in [((5, 5), (5, 5)),
                         ((5, 5), (1, 5)),
                         ((5, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type(theano.config.floatX,
                     [(entry == 1) for entry in xsh])('x')
            y = type(theano.config.floatX,
                     [(entry == 1) for entry in ysh])('y')
            e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            f(xv, yv)

            self.assertTrue((xv == zv).all())
            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type(theano.config.floatX,
                         [(entry == 1) for entry in xsh])('x')
                y = type(theano.config.floatX,
                         [(entry == 1) for entry in ysh])('y')
                e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                xv = rand_val(xsh)
                yv = rand_val(ysh)
                zv = xv + yv

                f(xv, yv)

                assert xv.shape == zv.shape
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fill(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")
        for linker, op, t, rval in zip(self.linkers, [self.op, self.cop],
                                       [self.type, self.ctype],
                                       [self.rand_val, self.rand_cval]):
            x = t(theano.config.floatX, [0, 0])('x')
            y = t(theano.config.floatX, [1, 1])('y')
            e = op(scalar.Second(scalar.transfer_type(0)), {0: 0})(x, y)
            f = linker().accept(FunctionGraph([x, y], [e])).make_function()
            xv = rval((5, 5))
            yv = rval((1, 1))
            f(xv, yv)
            assert (xv == yv).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_weird_strides(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")
        for linker, op, t, rval in zip(self.linkers, [self.op, self.cop],
                                       [self.type, self.ctype],
                                       [self.rand_val, self.rand_cval]):
            x = t(theano.config.floatX, [0, 0, 0, 0, 0])('x')
            y = t(theano.config.floatX, [0, 0, 0, 0, 0])('y')
            e = op(scalar.add)(x, y)
            f = linker().accept(FunctionGraph([x, y], [e])).make_function()
            xv = rval((2, 2, 2, 2, 2))
            yv = rval((2, 2, 2, 2, 2)).transpose(4, 0, 3, 1, 2)
            zv = xv + yv
            assert (f(xv, yv) == zv).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_same_inputs(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")
        for linker, op, t, rval in zip(self.linkers, [self.op, self.cop],
                                       [self.type, self.ctype],
                                       [self.rand_val, self.rand_cval]):
            x = t(theano.config.floatX, [0, 0])('x')
            e = op(scalar.add)(x, x)
            f = linker().accept(FunctionGraph([x], [e])).make_function()
            xv = rval((2, 2))
            zv = xv + xv
            assert (f(xv) == zv).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_perform_nan(self):
        for dtype in ["floatX", "complex64", "complex128"]:
            self.with_linker(gof.PerformLinker(), scalar.add, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.mul, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.maximum, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.minimum, dtype=dtype,
                             test_nan=True)
            self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype,
                             test_nan=True, tensor_op=tensor.any)
            self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype,
                             test_nan=True, tensor_op=tensor.all)