Python theano.tensor.nnet 模块,conv2d() 实例源码

我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用theano.tensor.nnet.conv2d()

项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def local_mean_subtraction(input, kernel_size=5):

    input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
    input = input.reshape(input_shape).astype(floatX)

    X = T.tensor4(dtype=floatX)
    filter_shape = (1, 1, kernel_size, kernel_size)
    filters = mean_filter(kernel_size).reshape(filter_shape)
    filters = shared(_asarray(filters, dtype=floatX), borrow=True)

    mean = conv2d(input=X,
                  filters=filters,
                  input_shape=input.shape,
                  filter_shape=filter_shape,
                  border_mode='half')
    new_X = X - mean
    f = function([X], new_X)
    return f(input)
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        # Reshape the input to 2D
        self.inpt = inpt.reshape(self.image_shape)

        # Do convolution
        self.conv_out = conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            input_shape=self.image_shape, border_mode=self.border_mode,
            subsample=self.stride)

        # Get the feature maps for this layer
        self.feature_maps = theano.function([self.inpt], self.conv_out)

        # Apply bias and activation and set as output
        self.output = self.activation_fn(
            self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in convolutional layers
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        # Reshape the input to 2D
        self.inpt = inpt.reshape(self.image_shape)

        # Do convolution
        self.conv_out = conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            input_shape=self.image_shape, border_mode=self.border_mode,
            subsample=self.stride)

        # Get the feature maps for this layer
        self.feature_maps = theano.function([self.inpt], self.conv_out)

        # Max pooling
        pooled_out = pool.pool_2d(input=self.conv_out, ds=self.poolsize,
                                  ignore_border=True, mode='max')

        # Apply bias and activation and set as output
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in convolutional layers
项目:lazyprogrammer    作者:inhwane    | 项目源码 | 文件源码
def convpool(X, W, b, poolsize=(2, 2)):
    conv_out = conv2d(input=X, filters=W)

    # downsample each feature map individually, using maxpooling
    pooled_out = downsample.max_pool_2d(
        input=conv_out,
        ds=poolsize,
        ignore_border=True
    )

    # add the bias term. Since the bias is a vector (1D array), we first
    # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
    # thus be broadcasted across mini-batches and feature map
    # width & height
    # return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
    return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
项目:lazyprogrammer    作者:inhwane    | 项目源码 | 文件源码
def convpool(X, W, b, poolsize=(2, 2)):
    conv_out = conv2d(input=X, filters=W)

    # downsample each feature map individually, using maxpooling
    pooled_out = downsample.max_pool_2d(
        input=conv_out,
        ds=poolsize,
        ignore_border=True
    )

    # add the bias term. Since the bias is a vector (1D array), we first
    # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
    # thus be broadcasted across mini-batches and feature map
    # width & height
    # return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
    return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
项目:IQA_BIECON_release    作者:jongyookim    | 项目源码 | 文件源码
def get_output(self, input, **kwargs):
        var_shape = kwargs.get('var_shape', False)
        if var_shape:
            input_shape = None
        else:
            input_shape = self.input_shape
        lin_output = conv2d(
            input=input,
            filters=self.W,
            filter_shape=self.filter_shape,
            border_mode=self.mode,
            subsample=self.subsample,
            input_shape=input_shape
        )

        if self.batch_norm:
            lin_output = self.bn_layer.get_output(lin_output)
        elif not self.no_bias:
            lin_output += self.b.dimshuffle('x', 0, 'x', 'x')

        return self.activation(lin_output)
项目:theano-mc-cnn    作者:epiception    | 项目源码 | 文件源码
def model(X, w1, w2, w3, w4):

    l1 = relu((conv2d(X,w1, border_mode='full')))
    l2 = relu((conv2d(l1,w2, border_mode='valid')))
    l3 = relu((conv2d(l2,w3,border_mode='full')))
    l4 = conv2d(l3,w4,border_mode='valid')

    output = l2_norm_layer(l4)

    return output
项目:theano-mc-cnn    作者:epiception    | 项目源码 | 文件源码
def model(X, w1, w2, w3, w4):

    l1 = relu((conv2d(X,w1, border_mode='full')))
    l2 = relu((conv2d(l1,w2, border_mode='valid')))
    l3 = relu((conv2d(l2,w3,border_mode='full')))
    l4 = conv2d(l3,w4,border_mode='valid')

    output = l2_norm_layer(l4)

    return output
项目:vaegan    作者:anitan0925    | 项目源码 | 文件源码
def conv( x, w, b=None ):
    s = int(np.floor(w.get_value().shape[-1]/2.))
    z = conv2d(x, w, border_mode='full')[:, :, s:-s, s:-s]
    if b is not None:
        z += b.dimshuffle('x', 0, 'x', 'x')

    return z
项目:LeNet5    作者:LukaszObara    | 项目源码 | 文件源码
def __init__(self, input, filter_shape, image_shape, padding=(0, 0), 
                 stride=(1, 1), activation_fn=None, seed=3235):

        assert image_shape[1] == filter_shape[1]

        # rng = np.random.RandomState(seed)

        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.activation_fn = activation_fn

        fan_in = np.prod(filter_shape[1:])
        fan_out = filter_shape[0]*np.prod(filter_shape[2:]) // 2
        W_bound = np.sqrt(6/(fan_in+fan_out))
        w = np.random.uniform(low=-W_bound, high=W_bound, size=filter_shape)
        b_vals = np.random.uniform(size=filter_shape[0])

        # Initiliaze weights with random variables
        self.W = theano.shared(name='weights',
                               value=w.astype(theano.config.floatX),
                               borrow=True)
        self.b = theano.shared(name='bias',
                               value=b_vals.astype(theano.config.floatX), 
                               borrow=True)

        conv_out = conv2d(input=input, filters=self.W, border_mode=padding,
                          subsample=stride, filter_shape=filter_shape, 
                          input_shape=image_shape)

        l_output = conv_out + self.b.dimshuffle(('x', 0, 'x', 'x'))
        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        # Parameters of the model
        self.params = [self.W, self.b]
项目:LeNet5    作者:LukaszObara    | 项目源码 | 文件源码
def __init__(self, input, filter_shape, image_shape, padding=(0, 0), 
                 stride=(1, 1), activation_fn=None, seed=3235):

        assert image_shape[1] == filter_shape[1]

        # rng = np.random.RandomState(seed)

        self.input = input
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.activation_fn = activation_fn

        fan_in = np.prod(filter_shape[1:])
        fan_out = filter_shape[0]*np.prod(filter_shape[2:]) // 2
        W_bound = np.sqrt(6/(fan_in+fan_out))
        w = np.random.uniform(low=-W_bound, high=W_bound, size=filter_shape)
        b_vals = np.random.uniform(size=filter_shape[0])

        # Initiliaze weights with random variables
        self.W = theano.shared(name='weights',
                               value=w.astype(theano.config.floatX),
                               borrow=True)
        self.b = theano.shared(name='bias',
                               value=b_vals.astype(theano.config.floatX), 
                               borrow=True)

        conv_out = conv2d(input=input, filters=self.W, border_mode=padding,
                          subsample=stride, filter_shape=filter_shape, 
                          input_shape=image_shape)

        l_output = conv_out + self.b.dimshuffle(('x', 0, 'x', 'x'))
        self.output = (l_output if activation_fn is None 
                       else activation_fn(l_output))

        # Parameters of the model
        self.params = [self.W, self.b]
项目:DBQA    作者:nanfeng1101    | 项目源码 | 文件源码
def __call__(self, q_input, a_input, *args, **kwargs):
        # convolve input feature maps with filters
        q_conv_out = conv2d(
            input=q_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        a_conv_out = conv2d(
            input=a_input,
            filters=self.W,
            filter_shape=self.filter_shape
        )
        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        if self.non_linear == "tanh":
            q_conv_out_tanh = Tanh(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_tanh = Tanh(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_tanh, ws=self.pool_size, ignore_border=True) # max
            a_output = pool.pool_2d(input=a_conv_out_tanh, ws=self.pool_size, ignore_border=True)
        elif self.non_linear == "relu":
            q_conv_out_relu = ReLU(q_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_relu = ReLU(a_conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_relu, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out_relu, ws=self.pool_size, ignore_border=True)
        else:
            q_output = pool.pool_2d(input=q_conv_out, ws=self.pool_size, ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out, ws=self.pool_size, ignore_border=True)

        return q_output, a_output
项目:insuranceQA-cnn-lstm    作者:cszhz    | 项目源码 | 文件源码
def _cnn_net(self, tparams, cnn_input, batch_size, sequence_len, num_filters, filter_sizes, proj_size):
    outputs = []
    for filter_size in filter_sizes:
        filter_shape = (num_filters, 1, filter_size, proj_size)
        image_shape = (batch_size, 1, sequence_len, proj_size)
        W = tparams['cnn_W_' + str(filter_size)]
        b = tparams['cnn_b_' + str(filter_size)]
        conv_out = conv2d(input=cnn_input, filters=W, filter_shape=filter_shape, input_shape=image_shape)
        pooled_out = pool.pool_2d(input=conv_out, ds=(sequence_len - filter_size + 1, 1), ignore_border=True, mode='max')
        pooled_active = T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
        outputs.append(pooled_active)
    num_filters_total = num_filters * len(filter_sizes)
    output_tensor = T.reshape(T.concatenate(outputs, axis=1), [batch_size, num_filters_total])
    return output_tensor
项目:sca-cnn    作者:zjuchenlong    | 项目源码 | 文件源码
def res5b_2_res5c_branch2a(res5b_res5b_relu_0_split_0, conv_params, bn_params):
    assert res5b_res5b_relu_0_split_0.ndim == 4
    res5c_branch2a = conv2d(input=res5b_res5b_relu_0_split_0, filters=conv_params['res5c_branch2a_0'], border_mode='valid', filter_flip=False)
    bn5c_branch2a = (res5c_branch2a - bn_params['bn5c_branch2a_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2a_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2a = bn5c_branch2a * bn_params['scale5c_branch2a_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2a_1'].dimshuffle('x', 0, 'x', 'x')
    res5c_branch2a_relu = tensor.nnet.relu(scale5c_branch2a, alpha=0.0)

    return res5c_branch2a_relu
项目:sca-cnn    作者:zjuchenlong    | 项目源码 | 文件源码
def res5c_branch2a_2_res5c_branch2b(res5c_branch2a_relu, conv_params, bn_params):
    res5c_branch2b = conv2d(input=res5c_branch2a_relu, filters=conv_params['res5c_branch2b_0'], border_mode='half', filter_flip=False)
    bn5c_branch2b = (res5c_branch2b - bn_params['bn5c_branch2b_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2b_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2b = bn5c_branch2b * bn_params['scale5c_branch2b_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2b_1'].dimshuffle('x', 0, 'x', 'x')
    res5c_branch2b_relu = tensor.nnet.relu(scale5c_branch2b, alpha=0.0)

    return res5c_branch2b_relu
项目:sca-cnn    作者:zjuchenlong    | 项目源码 | 文件源码
def res5c_branch2b_2_res5c_branch2c(res5c_branch2b_relu, conv_params, bn_params):
    res5c_branch2c = conv2d(input=res5c_branch2b_relu, filters=conv_params['res5c_branch2c_0'], border_mode='valid', filter_flip=False)
    bn5c_branch2c = (res5c_branch2c - bn_params['bn5c_branch2c_0'].dimshuffle('x', 0, 'x', 'x')) / tensor.sqrt(bn_params['bn5c_branch2c_1'].dimshuffle('x', 0, 'x', 'x') + numpy.float32(1e-5))
    scale5c_branch2c = bn5c_branch2c * bn_params['scale5c_branch2c_0'].dimshuffle('x', 0, 'x', 'x') + bn_params['scale5c_branch2c_1'].dimshuffle('x', 0, 'x', 'x')

    return scale5c_branch2c
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:crikey    作者:kastnerkyle    | 项目源码 | 文件源码
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
    """
    Light wrapper around conv2d - optionally handle biases
    """
    r = nnet.conv2d(
            input=input,
            filters=filters,
            border_mode=border_mode,
            subsample=stride,
            filter_flip=True)
    if biases is None:
        return r
    else:
        return r + biases.dimshuffle('x', 0, 'x', 'x')
项目:dl4mt-c2c    作者:nyu-dl    | 项目源码 | 文件源码
def conv_encoder(tparams, state_below, options, prefix='conv_enc',
          one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
    # state_below : maxlen X n_samples X dim_word_src
    # mask : maxlen X n_samples
    # data = (n_samples, dim, maxlen, 1)
    # kernel = (nkernels, dim, width, 1)

    maxlen = state_below.shape[0]
    n_samples = state_below.shape[1]
    dim = state_below.shape[2]

    data = state_below.dimshuffle(1,2,0,'x')
    # data : n_samples X dim X maxlen X 1

    W = tparams[_p(prefix, 'convW')]
    b = tparams[_p(prefix, 'convB')]

    #conv_out = dnn_conv(data, W, border_mode='valid', subsample=(stride,1), precision='float32')
    output = dnn_conv(data, W, border_mode='half', precision='float32')
    #conv_out = conv2d(data, W, border_mode='valid')
    #conv_out = conv2d(data, W, input_shape=(8, 256, 450, 1), filter_shape=(64, 1, 4, 1), border_mode='valid')

    if curr_width % 2 == 0:
        output = output[:,:,:-1,:]

    output = tensor.nnet.relu(output + b.dimshuffle('x',0,'x','x'))

    output = dnn_pool(output, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))

    #output = tensor.nnet.sigmoid(conv_out)
    # output : n_samples X nkernels X (maxlen-width+1) X 1

    #output = output.dimshuffle(2,0,1,3).squeeze()
    output = output.dimshuffle(2,0,1,3)[:,:,:,0]
    # NOTE : when we pass 1 or 2 instead of 0, get IndexError: index out of bounds
    # not sure why squeeze wouldn't work though

    # output : (maxlen-width+1) X n_samples X nkernels 

    return output
    # emb : maxlen X n_samples X dim_word_src
项目:dl4mt-c2c    作者:nyu-dl    | 项目源码 | 文件源码
def multi_scale_conv_encoder(tparams, state_below, options, prefix='conv_enc',
              one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
    # state_below.shape = (maxlen_x_pad + 2*pool_stride, n_samples, dim_word_src)
    # mask.shape = (maxlen_x_pad/pool_stride, n_samples)
    assert len(width) == len(nkernels)

    data = state_below.dimshuffle(1,2,0,'x')
    # data.shape = (n_samples, dim_word_src, maxlen_x_pad + 2*pool_stride, 1)

    W = [tparams[_p(prefix, 'convW')+str(idx)] for idx in range(len(width))]
    b = [tparams[_p(prefix, 'convB')+str(idx)] for idx in range(len(width))]

    output = []

    for idx in range(len(width)):
        curr_width = width[idx]

        output.append(dnn_conv(data, W[idx], border_mode='half', precision='float32'))
        # output[idx].shape = (n_samples, nkernels[idx], (maxlen_x_pad + 2*pool_stride), 1)

        if curr_width % 2 == 0:
            output[idx] = (output[idx])[:,:,:-1,:] # for filters with an even numbered width, half convolution yields an output whose length is 1 longer than the input, hence discarding the last one here. For more detail, consult http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv2d

        output[idx] = tensor.nnet.relu(output[idx] + b[idx].dimshuffle('x',0,'x','x'))

    result = tensor.concatenate(output, axis=1)
    # result.shape = (n_samples, sum(nkernels), (maxlen_x_pad + 2*pool_stride), 1)

    result = dnn_pool(result, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
    # result.shape = (n_samples, sum(nkernels), (maxlen_x_pad/pool_stride + 2), 1)

    result = result.dimshuffle(2,0,1,3)[1:-1,:,:,0]
    # We get rid of the first and the last result and shuffle.
    # result.shape = (maxlen_x_pad/pool_stride, n_samples, sum(nkernels))

    return result
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def __init__(self, rng, input, filter_shape, image_shape, activation,
                 padding, W=None, b=None, b_v=0., stride=(1, 1)):
        """Implement a convolution layer. No pooling."""
        assert image_shape[1] == filter_shape[1]
        self.input = input
        self.x = input
        print filter_shape, "***********"
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        if rng is None:
            rng = numpy.random.RandomState(23455)
        if W is None:
            W = theano.shared(
                numpy.asarray(
                    rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                    dtype=theano.config.floatX
                ),
                name="w_conv",
                borrow=True
            )
        if b is None:
            b_v = (
                numpy.ones(
                    (filter_shape[0],)) * b_v).astype(theano.config.floatX)
            b = theano.shared(value=b_v, name="b_conv", borrow=True)

        self.W = W
        self.b = b
        conv_out = conv2d(
            input=self.x,
            filters=self.W,
            input_shape=image_shape,
            filter_shape=filter_shape,
            border_mode=padding,
            subsample=stride
        )
        linear = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        if activation is not None:
            self.output = activation(linear)
        else:
            self.output = linear
        self.params = [self.W, self.b]
项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def lecun_lcn(input, kernel_size=9, threshold=1e-4, use_divisor=False):
    """
    Yann LeCun's local contrast normalization
    Orginal code in Theano by: Guillaume Desjardins

    :param input:
    :param kernel_size:
    :param threshold:
    :param use_divisor:
    :return:
    """
    input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
    input = input.reshape(input_shape).astype(floatX)

    X = T.tensor4(dtype=floatX)
    filter_shape = (1, 1, kernel_size, kernel_size)
    filters = gaussian_filter(kernel_size).reshape(filter_shape)
    filters = shared(_asarray(filters, dtype=floatX), borrow=True)

    convout = conv2d(input=X,
                     filters=filters,
                     input_shape=input.shape,
                     filter_shape=filter_shape,
                     border_mode='half')
    new_X = X - convout

    if use_divisor:
        # Scale down norm of kernel_size x kernel_size patch
        sum_sqr_XX = conv2d(input=T.sqr(T.abs_(new_X)),
                            filters=filters,
                            input_shape=input.shape,
                            filter_shape=filter_shape,
                            border_mode='half')

        denom = T.sqrt(sum_sqr_XX)
        per_img_mean = denom.mean(axis=[2, 3])
        divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
        divisor = T.maximum(divisor, threshold)
        new_X = new_X / divisor

    new_X = new_X.dimshuffle(0, 2, 3, 1)
    new_X = new_X.flatten(ndim=3)
    f = function([X], new_X)
    return f(input)
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def get_output(self):
        if self.dropout_rate!=0:
            seed = np.random.randint(10e6)
            rng = RandomStreams(seed=seed)
            retain_prob = 1. - self.dropout_rate
            self.input *= rng.binomial(self.input.shape, p=retain_prob, dtype=self.input.dtype)
            self.input /= retain_prob
        conv_out = conv2d(self.input, self.Cnn_W) #(batch size, output channels, output rows, output columns)
        conv_out = conv_out + self.Cnn_B.dimshuffle('x', 0, 'x', 'x')
        # out_put_shape = self.get_output_shape()
        # r_matrix_s = np.eye(out_put_shape[3], out_put_shape[3], 0)
        # r_matrix_x = np.eye(out_put_shape[3], out_put_shape[3], -1)
        # test = [[r_matrix_s for i in range(self.input_shape[1])] for j in range(self.input_shape[0])]
        # print test
        # r_matrix_s = theano.shared(np.array(r_matrix_s).astype(np.float32))
        #
        # r_matrix_x = theano.shared(np.array(r_matrix_x).astype(np.float32))
        #
        # r_matrix = r_matrix_s*self.Rnn_W_s.dimshuffle(0, 'x', 'x') + \
        #             r_matrix_x*(1-self.Rnn_W_s).dimshuffle(0, 'x', 'x')
        # conv_out = conv_out.dimshuffle(1, 0, 2, 3)
        # def step (con, r_m, r_b):
        #     return T.dot(con, r_m) + r_b
        # conv_out, _ = theano.scan(step, sequences=[conv_out, r_matrix, self.Rnn_W_b])
        # conv_out = conv_out.dimshuffle(1, 0, 2, 3)
        # R_conv_out = T.concatenate([T.zeros_like(conv_out[:, :, :, :1]), conv_out], axis = 3)
        # R_conv_out = R_conv_out[:, :, :,:conv_out.shape[3]]
        # RNN_Ws = self.Rnn_W_s.dimshuffle('x', 0, 'x', 'x')
        # RNN_b = self.Rnn_W_b
        # R_conv_out = R_conv_out *RNN_Ws + conv_out * (1-RNN_Ws) + RNN_b
        # conv_out = conv_out.dimshuffle(1,0,2,3)
        #
        # def Rnn_add(channel,RNN_b,RNN_Ws,RNN_Wx):
        #     RNN_channel = T.concatenate([T.zeros_like(channel[:, :, :1]),channel],axis = 2)
        #     RNN_channel = RNN_channel[:,:,:channel.shape[2]]
        #     res = RNN_channel*RNN_Ws + channel*RNN_Wx + RNN_b
        #     return res
        #self.Rnn_W_s = T.abs_(self.Rnn_W_s)
        # R_conv_out,_ = theano.scan(Rnn_add,sequences= [conv_out,self.Rnn_W_b,self.Rnn_W_s,1 - self.Rnn_W_s])
        # R_conv_out = R_conv_out.dimshuffle(1,0,2,3)
        #output = self.activition(R_conv_out)
        #return self.input
        return self.activition(conv_out)
        #return output
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_conv(self):
        for conv_op in [conv.conv2d, conv2d]:
            for border_mode in ['valid', 'full']:
                image_shape = (2, 2, 4, 5)
                filter_shape = (2, 2, 2, 3)
                image_dim = len(image_shape)
                filter_dim = len(filter_shape)
                input = tensor.TensorType(
                    theano.config.floatX,
                    [False] * image_dim)(name='input')
                filters = tensor.TensorType(
                    theano.config.floatX,
                    [False] * filter_dim)(name='filter')
                ev_input = tensor.TensorType(
                    theano.config.floatX,
                    [False] * image_dim)(name='ev_input')
                ev_filters = tensor.TensorType(
                    theano.config.floatX,
                    [False] * filter_dim)(name='ev_filters')

                def sym_conv2d(input, filters):
                    return conv_op(input, filters, border_mode=border_mode)
                output = sym_conv2d(input, filters).flatten()
                yv = tensor.Rop(output, [input, filters], [ev_input, ev_filters])
                mode = None
                if theano.config.mode == "FAST_COMPILE":
                    mode = "FAST_RUN"
                rop_f = function([input, filters, ev_input, ev_filters],
                                 yv, on_unused_input='ignore', mode=mode)
                sy, _ = theano.scan(lambda i, y, x1, x2, v1, v2:
                                    (tensor.grad(y[i], x1) * v1).sum() +
                                    (tensor.grad(y[i], x2) * v2).sum(),
                                    sequences=tensor.arange(output.shape[0]),
                                    non_sequences=[output, input, filters,
                                                   ev_input, ev_filters],
                                    mode=mode)
                scan_f = function([input, filters, ev_input, ev_filters], sy,
                                  on_unused_input='ignore', mode=mode)
                dtype = theano.config.floatX
                image_data = numpy.random.random(image_shape).astype(dtype)
                filter_data = numpy.random.random(filter_shape).astype(dtype)
                ev_image_data = numpy.random.random(image_shape).astype(dtype)
                ev_filter_data = numpy.random.random(filter_shape).astype(dtype)
                v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
                v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
                assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))