Python theano.tensor 模块,split() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用theano.tensor.split()

项目:fxnn    作者:khaotik    | 项目源码 | 文件源码
def lyr_lstm(
        self, name_,
        s_x_, s_cell_, s_hid_,
        idim_, hdim_,
        axis_=-1,
        lyr_linear_=None,
        op_act_=T.tanh,
        op_gate_=T.nnet.sigmoid):
        s_inp = T.join(axis_, s_x_, s_hid_)
        if lyr_linear_ is None:
            lyr_linear_ = self.lyr_linear
        s_gates_lin, s_inp_lin = T.split(
            lyr_linear_(name_+'_rec', s_inp, idim_+hdim_, hdim_*4),
            [hdim_*3,hdim_], 2, axis=axis_)
        s_igate, s_fgate, s_ogate = T.split(op_gate_(s_gates_lin), [hdim_]*3, 3, axis=axis_)
        s_cell_tp1 = s_igate*op_act_(s_inp_lin) + s_fgate*s_cell_
        s_hid_tp1 = op_act_(s_cell_tp1)*s_ogate
        return s_cell_tp1, s_hid_tp1
项目:dnc-theano    作者:khaotik    | 项目源码 | 文件源码
def lyr_gru_flat(
        self, name_,
        s_x_, s_state_,
        idim_, hdim_,
        axis_=-1,
        lyr_linear_=None,
        op_act_=T.tanh,
        op_gate_=T.nnet.sigmoid,
        params_group_='params'
        ):
        '''
        GRU layer, flat version

        In order to use, you need to provide state variable

        '''
        if lyr_linear_ is None:
            lyr_linear_ = self.lyr_linear
        s_igate = lyr_linear_(name_+'_igate', idim_+hdim_, idim_, params_group_=params_group_)
        s_inp_gated = T.join(axis_, s_x_ * op_gate_(s_igate), s_state_)
        s_gate_lin, s_state_tp1_lin = T.split(lyr_linear_(name_+'_gate', s_inp_gated, idim_+hdim_, hdim_*2), [hdim_,hdim_], 2, axis_)
        s_gate = op_gate_(s_gate_lin)
        return s_state_*s_gate + op_act_(s_state_tp1_lin)*(1.-s_gate)
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def get_output_for(self, input, deterministic=False, **kwargs):
        def _phase_shift(input,r):
            bsize,c,a,b = input.shape[0],1,self.output_shape[2]//r,self.output_shape[3]//r
            X = T.reshape(input, (bsize,r,r,a,b))
            X = T.transpose(X, (0, 3,4,1,2))  # bsize, a, b, r2,r1
            X = T.split(x=X,splits_size=[1]*a,n_splits=a,axis=1)  # a, [bsize, b, r, r]
            X = [T.reshape(x,(bsize,b,r,r))for x in X]
            X = T.concatenate(X,axis=2)  # bsize, b, a*r, r 
            X = T.split(x=X,splits_size =[1]*b,n_splits=b,axis=1)  # b, [bsize, a*r, r]
            X = [T.reshape(x,(bsize,a*r,r))for x in X]
            X = T.concatenate(X,axis=2) # bsize, a*r, b*r 
            return X.dimshuffle(0,'x',1,2)
        Xc = T.split(x=input,splits_size =[input.shape[1]//self.c]*self.c,n_splits=self.c,axis=1)
        return T.concatenate([_phase_shift(xc,self.r) for xc in Xc],axis=1)        

# Multiscale Dilated Convolution Block
# This function (not a layer in and of itself, though you could make it one) returns a set of concatenated conv2d and dilatedconv2d layers.
# Each layer uses the same basic filter W, operating at a different dilation factor (or taken as the mean of W for the 1x1 conv).
# The channel-wise output of each layer is weighted by a set of coefficients, which are initialized to 1 / the total number of dilation scales,
# meaning that were starting by taking an elementwise mean. These should be learnable parameters.

# NOTES: - I'm considering changing the variable names to be more descriptive, and look less like ridiculous academic code. It's on the to-do list.
#        - I keep the bias and nonlinearity out of the default definition for this layer, as I expect it to be batchnormed and nonlinearized in the model config.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_useless_split():
    x = tensor.matrix('x')
    splits = tensor.ivector('splits')
    opt = tensor.split(x, splits, n_splits=1)
    nonopt = tensor.split(x, splits, n_splits=3)

    mode = compile.get_default_mode().including("local_useless_split")
    f_opt = theano.function([x, splits], opt, mode=mode)
    f_nonopt = theano.function([x, splits], nonopt, mode=mode)

    f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])
    f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])
    graph_opt = f_opt.maker.fgraph.toposort()
    graph_nonopt = f_nonopt.maker.fgraph.toposort()

    assert isinstance(graph_opt[-1].op, DeepCopyOp)
    assert len(graph_nonopt)==1
    assert isinstance(graph_nonopt[0].op, tensor.Split)

    assert check_stack_trace(f_opt, ops_to_check=[Assert])
    assert check_stack_trace(f_nonopt, ops_to_check='all')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def infer_shape(self, node, in_shapes):
        shape_a = in_shapes[0]
        n = node.inputs[1]
        axis = node.inputs[2]
        if len(shape_a) == 1:
            return [(n,)]
        elif isinstance(axis, tensor.TensorConstant):
            out_shape = (list(shape_a[0: axis.data.item()]) + [n] +
                         list(shape_a[axis.data + 1:]))
        else:
            l = len(shape_a)
            shape_a = tensor.stack(shape_a)
            out_shape = tensor.concatenate((shape_a[0: axis], [n],
                                            shape_a[axis + 1:]))
            n_splits = [1] * l
            out_shape = tensor.split(out_shape, n_splits, l)
            out_shape = [a[0] for a in out_shape]
        return [out_shape]
项目:fxnn    作者:khaotik    | 项目源码 | 文件源码
def lyr_gru(
        self, name_,
        s_x_, s_state_,
        idim_, hdim_,
        axis_=0,
        lyr_linear_=None,
        op_act_=T.tanh,
        op_gate_=T.nnet.sigmoid):
        if lyr_linear_ is None:
            lyr_linear_ = self.lyr_linear
        s_igate = lyr_linear_(name_+'_igate', idim_+hdim_, idim_)
        s_inp_gated = T.join(axis_, s_x_ * op_gate_(s_igate), s_state_)
        s_gate_lin, s_state_tp1_lin = T.split(lyr_linear_(name_+'_gate', s_inp_gated, idim_+hdim_, hdim_*2), [hdim_,hdim_], 2, axis_)
        s_gate = op_gate_(s_gate_lin)
        return s_state_*s_gate + op_act_(s_state_tp1_lin)*(1.-s_gate)
项目:dnc-theano    作者:khaotik    | 项目源码 | 文件源码
def lyr_lstm_flat(
        self, name_,
        s_x_, s_cell_, s_hid_,
        idim_, hdim_,
        axis_=-1,
        lyr_linear_=None,
        op_act_=T.tanh,
        op_gate_=T.nnet.sigmoid,
        params_group_='params'
        ):
        '''
        LSTM layer, flat version

        In order to use, you need to provide state variable

        Returns:
            hidden_state, cell_state

        '''
        s_inp = T.join(axis_, s_x_, s_hid_)
        if lyr_linear_ is None:
            lyr_linear_ = self.lyr_linear
        s_gates_lin, s_inp_lin = T.split(
            lyr_linear_(name_+'_rec', s_inp, idim_+hdim_, hdim_*4),
            [hdim_*3,hdim_], 2, axis=axis_)
        s_igate, s_fgate, s_ogate = T.split(op_gate_(s_gates_lin), [hdim_]*3, 3, axis=axis_)
        s_cell_tp1 = s_igate*op_act_(s_inp_lin) + s_fgate*s_cell_
        s_hid_tp1 = op_act_(s_cell_tp1)*s_ogate
        return s_cell_tp1, s_hid_tp1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def assert_func_pair_optimized(self, func1, func2, data,
                                   should_copy=True, is_complex=False):
        """
        Check that a pair of funcs is optimized properly
        """

        x = T.cmatrix() if is_complex else T.fmatrix()
        o = func2(func1(x))
        f = theano.function([x], o, mode=self.mode)
        delta = f(data) - data
        topo = f.maker.fgraph.toposort()

        if should_copy:
            acceptable_topo_lens = [1]
        else:
            # The 2 funcs can be split apart if they are not inverses
            acceptable_topo_lens = [1, 2]

        if should_copy:
            delta_condition = numpy.all(delta == 0)
        else:
            delta_condition = numpy.all(delta != 0)

        self.assertTrue(len(topo) in acceptable_topo_lens)
        self.assertTrue(delta_condition)
        self.assertEqual(isinstance(topo[0].op, DeepCopyOp), should_copy,
                         "Inverse functions not removed!")
项目:amdtk    作者:amdtkdev    | 项目源码 | 文件源码
def _log_partition_symfunc():
    natural_params = T.vector()
    size = natural_params.shape[0] // 4
    np1, np2, np3, np4 = T.split(natural_params, 4 * [size], 4)

    log_Z = T.sum(T.gammaln(.5 * (np4 + 1)))
    log_Z += T.sum(- .5 * (np4 + 1) * T.log(.5 * (np1 - (np2 ** 2) / np3)))
    log_Z += T.sum(-.5 * T.log(np3))

    func = theano.function([natural_params], log_Z)
    grad_func = theano.function([natural_params],
                                T.grad(T.sum(log_Z), natural_params))
    return func, grad_func
项目:amdtk    作者:iondel    | 项目源码 | 文件源码
def _log_partition_symfunc():
    natural_params = T.vector()
    size = natural_params.shape[0] // 4
    np1, np2, np3, np4 = T.split(natural_params, 4 * [size], 4)

    log_Z = T.sum(T.gammaln(.5 * (np4 + 1)))
    log_Z += T.sum(- .5 * (np4 + 1) * T.log(.5 * (np1 - (np2 ** 2) / np3)))
    log_Z += T.sum(-.5 * T.log(np3))

    func = theano.function([natural_params], log_Z)
    grad_func = theano.function([natural_params],
                                T.grad(T.sum(log_Z), natural_params))
    return func, grad_func
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_gpu_split():
    """ Test that the GpuSplit op is being applied and works """
    # Construct symbolic split
    x = tensor.fvector()
    splits = tensor.lvector()
    ra, rb, rc = tensor.split(x, splits, n_splits=3, axis=0)
    # Compile function to use CPU
    f = theano.function([x, splits], [ra, rb, rc], mode=mode_without_gpu)
    # Get values for CPU version
    cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
    l = f.maker.fgraph.toposort()
    # Ensure that one op is theano.tensor.Split
    assert any([isinstance(o.op, theano.tensor.Split) for o in l])
    # GPU version
    f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu)
    gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
    l = f.maker.fgraph.toposort()
    assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
    # Check equality
    assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])

    # Test the other path of the optimizer, when it is the output that
    # is moved to the GPU.
    ra = cuda.gpu_from_host(ra)
    f = theano.function([x, splits], [ra, rb, rc],
                        mode=mode_with_gpu.excluding("InputToGpuOptimizer"))
    gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1])
    l = f.maker.fgraph.toposort()
    assert any([isinstance(o.op, cuda.GpuSplit) for o in l])
    # Check equality
    assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])

    # Test that split with only 1 output work
    ra = tensor.split(x, splits, n_splits=1, axis=0)
    f = theano.function([x, splits], [ra], mode=mode_without_gpu)
    cpu_res = f([0, 1, 2, 3, 4, 5], [6])
    l = f.maker.fgraph.toposort()
    # Ensure that no op is theano.tensor.Split or GpuSplit, they get
    # optimized away.
    assert not any([isinstance(o.op, (theano.tensor.Split,
                                      cuda.GpuSplit)) for o in l])
    # GPU version
    f = theano.function([x, splits], [ra], mode=mode_with_gpu)
    gpu_res = f([0, 1, 2, 3, 4, 5], [6])
    l = f.maker.fgraph.toposort()
    assert not any([isinstance(o.op, (theano.tensor.Split,
                                      cuda.GpuSplit)) for o in l])
    # Check equality
    assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)])