Python theano.tensor.nnet 模块,conv() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用theano.tensor.nnet.conv()

项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def __init__ (self, model_params, nkerns=[1,8,4], ckern=10, filter_sizes=[5,5,5,7]):

        """Initializes the architecture of the discriminator"""

        self.num_hid, num_dims, num_class, self.batch_size, self.num_channels = model_params
        self.D =  int(np.sqrt(num_dims / self.num_channels))
        numpy_rng=np.random.RandomState(1234)

        self.nkerns     = np.asarray(nkerns) * ckern # of constant gen filters in first conv layer
        self.nkerns[0] = self.num_channels
        self.filter_sizes=filter_sizes

        num_convH = self.nkerns[-1]*filter_sizes[-1]*filter_sizes[-1]
        self.W      = initialize_weight(num_convH,  self.num_hid,  'W', numpy_rng, 'uniform') 
        self.hbias  = theano.shared(np.zeros((self.num_hid,), dtype=theano.config.floatX), name='hbias_enc')       
        self.W_y    = initialize_weight(self.num_hid, num_class,  'W_y', numpy_rng, 'uniform') 

        self.L1 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[0], bnkern=self.nkerns[1] , bfilter_sz=filter_sizes[0], tfilter_sz=filter_sizes[1])
        self.L2 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[1], bnkern=self.nkerns[2] , bfilter_sz=filter_sizes[1], tfilter_sz=filter_sizes[2])

        self.num_classes = num_class
        self.params = [self.W_y, self.W, self.hbias] + self.L1.params + self.L2.params
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def propagate(self, X, num_train=None, atype='relu'):
        """Propagate, return binary output of fake/real image"""     

        image_shape0=[X.shape[0], self.num_channels, self.D, self.D]
        ConX = X.reshape(image_shape0)
        H0 = self.L1.conv(ConX, atype=atype)
        H1 = self.L2.conv(H0, atype=atype)
        H2 = self.L3.conv(H1, atype=atype) 
        H3 = self.L4.conv(H2, atype=atype) 
        H4 = self.L5.conv(H3, atype=atype) 
        H4 = H4.flatten(2)

        H5 = activation_fn_th(T.dot(H4, self.W) + self.hbias, atype='tanh')
        y  = T.nnet.sigmoid(T.dot(H5, self.W_y))    

        return y
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def __init__ (self, model_params, nkerns=[1,8,4,2,1], ckern=172, filter_sizes=[5,5,5,5,4]):
        """Initializes the architecture of the discriminator"""

        self.num_hid, num_dims, num_class, self.batch_size, self.num_channels = model_params
        self.D =  int(np.sqrt(num_dims / self.num_channels))
        numpy_rng=np.random.RandomState(1234)

        self.nkerns     = np.asarray(nkerns) * ckern # of constant gen filters in first conv layer
        self.nkerns[0] = self.num_channels
        self.filter_sizes=filter_sizes

        num_convH = self.nkerns[-1]*filter_sizes[-1]*filter_sizes[-1]

        self.W      = initialize_weight(num_convH,  self.num_hid,  'W', numpy_rng, 'uniform') 
        self.hbias  = theano.shared(np.zeros((self.num_hid,), dtype=theano.config.floatX), name='hbias')       
        self.W_y    = initialize_weight(self.num_hid, num_class,  'W_y', numpy_rng, 'uniform') 

        self.L1 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[0], bnkern=self.nkerns[1] , bfilter_sz=filter_sizes[1], tfilter_sz=filter_sizes[0])
        self.L2 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[1], bnkern=self.nkerns[2] , bfilter_sz=filter_sizes[2], tfilter_sz=filter_sizes[1])
        self.L3 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[2], bnkern=self.nkerns[3] , bfilter_sz=filter_sizes[3], tfilter_sz=filter_sizes[2])
        self.L4 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[3], bnkern=self.nkerns[4] , bfilter_sz=filter_sizes[4], tfilter_sz=filter_sizes[3])

        self.num_classes = num_class
        self.params = [self.W_y, self.W] \
                + self.L1.params + self.L2.params + self.L3.params + self.L4.params
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def __init__(self, model_params, nkerns=[1,8,4,2], ckern=128, filter_sizes=[5,5,5,5,4]):
        """Initializes the architecture of the discriminator"""

        self.num_hid, num_dims, num_class, self.batch_size, self.num_channels = model_params
        self.D      = int(np.sqrt(num_dims / self.num_channels))
        numpy_rng   = np.random.RandomState(1234)

        self.nkerns         = np.asarray(nkerns) * ckern # of constant gen filters in first conv layer
        self.nkerns[0]      = self.num_channels
        self.filter_sizes   = filter_sizes
        num_convH           = self.nkerns[-1]*filter_sizes[-1]*filter_sizes[-1]

        self.W      = initialize_weight(num_convH,  self.num_hid,  'W', numpy_rng, 'uniform') 
        self.hbias  = theano.shared(np.zeros((self.num_hid,), dtype=theano.config.floatX), name='hbias_enc')       
        self.W_y    = initialize_weight(self.num_hid, num_class,  'W_y', numpy_rng, 'uniform') 

        self.L1 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[0], bnkern=self.nkerns[1] , bfilter_sz=filter_sizes[0], tfilter_sz=filter_sizes[1])
        self.L2 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[1], bnkern=self.nkerns[2] , bfilter_sz=filter_sizes[1], tfilter_sz=filter_sizes[2])
        self.L3 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[2], bnkern=self.nkerns[3] , bfilter_sz=filter_sizes[2], tfilter_sz=filter_sizes[3])

        self.num_classes = num_class
        self.params = [self.W_y, self.W, self.hbias] + self.L1.params + self.L2.params + self.L3.params
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def propagate(self, X, num_train=None, atype='relu'):
        """Propagate, return binary output of fake/real image"""     
        image_shape0=[X.shape[0], self.num_channels, self.D, self.D]
        ConX = X.reshape(image_shape0)
        H0 = self.L1.conv(ConX, atype=atype)
        H1 = self.L2.conv(H0, atype=atype)
        H1 = H1.flatten(2)

        H2 = activation_fn_th(T.dot(H1, self.W) + self.hbias, atype='tanh')
        y  = T.nnet.sigmoid(T.dot(H2, self.W_y))    

        return y
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def __init__ (self, model_params, nkerns=[1,8,4,2,1,1], ckern=128*3, filter_sizes=[5,5,5,5,5,4]):
        """Initializes the architecture of the discriminator"""

        self.num_hid, num_dims, num_class, self.batch_size, self.num_channels = model_params
        self.D =  int(np.sqrt(num_dims / self.num_channels))
        numpy_rng=np.random.RandomState(1234)

        self.nkerns     = np.asarray(nkerns) * ckern # of constant gen filters in first conv layer
        self.nkerns[0] = self.num_channels
        self.filter_sizes=filter_sizes

        num_convH = self.nkerns[-1]*filter_sizes[-1]*filter_sizes[-1]

        self.W      = initialize_weight(num_convH,  self.num_hid,  'W', numpy_rng, 'uniform') 
        self.hbias  = theano.shared(np.zeros((self.num_hid,), dtype=theano.config.floatX), name='hbias')       
        self.W_y    = initialize_weight(self.num_hid, num_class,  'W_y', numpy_rng, 'uniform') 

        self.L1 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[0], bnkern=self.nkerns[1] , bfilter_sz=filter_sizes[1], tfilter_sz=filter_sizes[0])
        self.L2 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[1], bnkern=self.nkerns[2] , bfilter_sz=filter_sizes[2], tfilter_sz=filter_sizes[1])
        self.L3 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[2], bnkern=self.nkerns[3] , bfilter_sz=filter_sizes[3], tfilter_sz=filter_sizes[2])
        self.L4 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[3], bnkern=self.nkerns[4] , bfilter_sz=filter_sizes[4], tfilter_sz=filter_sizes[3])
        self.L5 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[4], bnkern=self.nkerns[5] , bfilter_sz=filter_sizes[5], tfilter_sz=filter_sizes[4])

        self.num_classes = num_class
        self.params = [self.W_y, self.W] \
                + self.L1.params + self.L2.params + self.L3.params + self.L4.params + self.L5.params
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def propagate(self, X, num_train=None, atype='relu'):
        """Propagate, return binary output of fake/real image"""     
        image_shape0=[X.shape[0], self.num_channels, self.D, self.D]
        ConX = X.reshape(image_shape0)
        H0 = self.L1.conv(ConX, atype=atype)
        H1 = self.L2.conv(H0, atype=atype)
        H2 = self.L3.conv(H1, atype=atype) 
        H2 = H2.flatten(2)

        H3 = activation_fn_th(T.dot(H2, self.W) + self.hbias, atype='tanh')
        y  = T.nnet.sigmoid(T.dot(H3, self.W_y))    

        return y
项目:a3c    作者:hercky    | 项目源码 | 文件源码
def build_shared_network(self):
        """
        This part contains trhe sharred params (conv layer) for both policy and value networks

        Returns the shared output
        """
        #from lasagne.layers import Conv2DLayer

        l_in = lasagne.layers.InputLayer(
            shape=(self.batch_size, self.history_length, self.img_height, self.img_width)
        )

        #l_in = lasagne.layers.ReshapeLayer(l_in, (1, self.history_length, self.img_height, self.img_width))

        #l_conv1 = dnn.Conv2DDNNLayer(
        l_conv1 = lasagne.layers.Conv2DLayer(
            incoming=l_in,
            num_filters=16,
            filter_size=(8, 8),
            stride=(4, 4),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(), # Defaults to Glorot
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l1_out=l_conv1.get_output_shape_for((self.history_length, self.img_height , self.img_width))
        #print "L1:", l1_out

        l_conv2 = lasagne.layers.Conv2DLayer(
            incoming=l_conv1,
            num_filters=32,
            filter_size=(4, 4),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l2_out=l_conv2.get_output_shape_for(l1_out)
        #print "L2:", l2_out

        l_hidden1 = lasagne.layers.DenseLayer(
            incoming=l_conv2,
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        return l_hidden1
项目:a3c    作者:hercky    | 项目源码 | 文件源码
def build_shared_network(self):
        """
        This part contains trhe sharred params (conv layer) for both policy and value networks

        Returns the shared output
        """
        #from lasagne.layers import Conv2DLayer

        l_in = lasagne.layers.InputLayer(
            shape=(self.history_length, self.img_height, self.img_width)
        )

        l_in = lasagne.layers.ReshapeLayer(l_in, (1, self.history_length, self.img_height, self.img_width))

        #l_conv1 = dnn.Conv2DDNNLayer(
        l_conv1 = lasagne.layers.Conv2DLayer(
            incoming=l_in,
            num_filters=16,
            filter_size=(8, 8),
            stride=(4, 4),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(), # Defaults to Glorot
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l1_out=l_conv1.get_output_shape_for((self.history_length, self.img_height , self.img_width))
        #print "L1:", l1_out

        l_conv2 = lasagne.layers.Conv2DLayer(
            incoming=l_conv1,
            num_filters=32,
            filter_size=(4, 4),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l2_out=l_conv2.get_output_shape_for(l1_out)
        #print "L2:", l2_out

        l_hidden1 = lasagne.layers.DenseLayer(
            incoming=l_conv2,
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        return l_hidden1
项目:a3c    作者:hercky    | 项目源码 | 文件源码
def build_shared_network(self):
        """
        This part contains trhe sharred params (conv layer) for both policy and value networks

        Returns the shared output
        """
        #from lasagne.layers import Conv2DLayer

        l_in = lasagne.layers.InputLayer(
            shape=(self.history_length, self.img_height, self.img_width)
        )

        l_in = lasagne.layers.ReshapeLayer(l_in, (1, self.history_length, self.img_height, self.img_width))

        #l_conv1 = dnn.Conv2DDNNLayer(
        l_conv1 = lasagne.layers.Conv2DLayer(
            incoming=l_in,
            num_filters=16,
            filter_size=(8, 8),
            stride=(4, 4),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(), # Defaults to Glorot
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l1_out=l_conv1.get_output_shape_for((self.history_length, self.img_height , self.img_width))
        #print "L1:", l1_out

        l_conv2 = lasagne.layers.Conv2DLayer(
            incoming=l_conv1,
            num_filters=32,
            filter_size=(4, 4),
            stride=(2, 2),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
            #dimshuffle=True
        )

        #l2_out=l_conv2.get_output_shape_for(l1_out)
        #print "L2:", l2_out

        l_hidden1 = lasagne.layers.DenseLayer(
            incoming=l_conv2,
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.HeUniform(),
            b=lasagne.init.Constant(.1)
        )

        return l_hidden1