Python layers 模块,conv2d() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用layers.conv2d()

项目:numpy_cnn    作者:Ryanshuai    | 项目源码 | 文件源码
def __init__(self, learning_rate, input_shape, BS):#input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 6], [2, 2], 'VALID')
        self.relu_1 = ly.relu()

        # conv2 : 6*12*12 - > 10*5*5
        self.conv2d_2 = ly.conv2d([BS, 6, 12, 12], [3, 3, 6, 10], [2, 2], 'VALID')
        self.relu_2 = ly.relu()

        self.flatter = ly.flatter()

        self.full_connect_1 = ly.full_connect(250, 84)
        self.relu_3 = ly.relu()

        self.full_connect_2 = ly.full_connect(84, 10)

        self.loss_func = ly.softmax_cross_entropy_error()
项目:numpy_cnn    作者:Ryanshuai    | 项目源码 | 文件源码
def __init__(self, learning_rate, input_shape, BS):#input_shape example: [BS,1,28,28]
        self.lr = learning_rate


        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 6], [1, 1], 'VALID')
        self.relu_1 = ly.relu()

        self.conv2d_2 = ly.conv2d([BS, 6, 12, 12], [3, 3, 6, 10], [2, 2], 'VALID')
        self.relu_2 = ly.relu()

        self.flatter = ly.flatter()

        self.full_connect_1 = ly.full_connect(250, 84)
        self.relu_3 = ly.relu()
        self.dropout = ly.dropout(lenth=84)

        self.full_connect_2 = ly.full_connect(84, 10)

        self.loss_func = ly.softmax_cross_entropy_error()
项目:numpy_cnn    作者:Ryanshuai    | 项目源码 | 文件源码
def __init__(self, learning_rate, input_shape, BS):#input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        self.conv2d_1 = ly.conv2d(input_shape,[5,5,1,32],[1,1])
        self.relu_1 = ly.relu()
        self.max_pool_1 = ly.max_pooling(self.conv2d_1.output_shape, filter_shape=[2,2], strides=[2,2])

        self.conv2d_2 = ly.conv2d(self.max_pool_1.output_shape,[5,5,32,64],[1,1])
        self.relu_2 = ly.relu()
        self.max_pool_2 = ly.max_pooling(self.conv2d_2.output_shape, filter_shape=[2,2], strides=[2,2])

        self.flatter = ly.flatter()

        self.full_connect_1 = ly.full_connect(input_len=7*7*64,output_len=1024)
        self.relu_3 = ly.relu()
        self.dropout_1 = ly.dropout(1024)

        self.full_connect_2 = ly.full_connect(input_len=1024,output_len=10)
        self.loss_func = ly.softmax_cross_entropy_error()
项目:numpy_cnn    作者:Ryanshuai    | 项目源码 | 文件源码
def __init__(self, learning_rate, input_shape):#input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        # conv1:(BS,1,28,28)->(BS,6,28,28)->(BS,6,14,14)
        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 6], [1, 1], 'SAME')
        self.relu_1 = ly.relu()
        self.pool_1 = ly.max_pooling(self.conv2d_1.output_shape, [2,2], [2,2], 'SAME')

        # conv2:(BS,6,14,14)->(BS,10,14,14)->(BS,10,7,7)
        self.conv2d_2 = ly.conv2d(self.pool_1.output_shape, [5, 5, 6, 10], [1, 1], 'SAME')
        self.relu_2 = ly.relu()
        self.pool_2 = ly.max_pooling(self.conv2d_2.output_shape, [2,2], [2,2], 'SAME')

        # flat:(BS,10,7,7)->(BS,490)
        self.flatter = ly.flatter()

        # fc1:(BS,490)->(BS,84)
        self.full_connect_1 = ly.full_connect(490, 84)
        self.relu_3 = ly.relu()
        self.dropout = ly.dropout(lenth=84)

        # fc2:(BS,84)->(BS,10)
        self.full_connect_2 = ly.full_connect(84, 10)

        self.loss_func = ly.softmax_cross_entropy_error()
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def __call__(self, inputs):
        with tf.name_scope(self.name):
            with tf.name_scope('preprocessing'):
                pad_1 = tf.pad(inputs, np.array([[0,0],[2,2],[2,2],[0,0]]))
                conv_1 = conv2d(pad_1, 64, kernel_size=6, strides = 2, name = '256to128')
                res_1 = residual(conv_1, 128)
                pool_1 = tf.contrib.layers.max_pool2d(res_1, [2,2], [2,2], padding= 'VALID')
                res_2 = residual(pool_1, 128)
                res_3 = residual(res_2, self.nFeat)
            # Supervision Table
            hg = [None] * self.nbStack
            ll = [None] * self.nbStack
            ll_ = [None] * self.nbStack
            drop = [None] * self.nbStack
            out = [None] * self.nbStack
            out_ = [None] * self.nbStack
            sum_ = [None] * self.nbStack
            with tf.name_scope('stacks'):
                with tf.name_scope('hourglass.1'):
                    hg[0] = self.hourglass(res_3, self.nLow, self.nFeat, 'hourglass')
                    ll[0] = convBnrelu(hg[0], self.nFeat, name= 'conv_1')
                    ll_[0] = conv2d(ll[0],self.nFeat,1,1,'VALID','ll')
                    drop[0] = tf.layers.dropout(ll_[0], rate = 0.1, training = self.train)
                    out[0] = conv2d(ll[0],self.outDim,1,1,'VALID','out')
                    out_[0] = conv2d(out[0],self.nFeat,1,1,'VALID','out_')
                    sum_[0] = tf.add_n([drop[0], out_[0], res_3])
                for i in range(1, self.nbStack-1):
                    with tf.name_scope('hourglass.' + str(i+1)):
                        hg[i] = self.hourglass(sum_[i-1], self.nLow, self.nFeat, 'hourglass')
                        ll[i] = convBnrelu(hg[i], self.nFeat, name='conv_1')
                        ll_[i] = conv2d(ll[i],self.nFeat,1,1,'VALID','ll')
                        drop[i] = tf.layers.dropout(ll_[i],rate=0.1, training = self.train)
                        out[i] = conv2d(ll[i],self.outDim,1,1,'VALID','out')
                        out_[i] = conv2d(out[i],self.nFeat,1,1,'VALID','out_')
                        sum_[i] = tf.add_n([drop[i], out_[i], sum_[i-1]])
                with tf.name_scope('hourglass.' + str(self.nbStack)):
                    hg[self.nbStack-1] = self.hourglass(sum_[self.nbStack - 2], self.nLow, self.nFeat, 'hourglass')
                    ll[self.nbStack-1] = convBnrelu(hg[self.nbStack - 1], self.nFeat, name='conv_1')
                    drop[self.nbStack-1] = tf.layers.dropout(ll[self.nbStack-1], rate=0.1, training = self.train)
                    out[self.nbStack-1] = conv2d(drop[self.nbStack-1],self.outDim,1,1,'VALID', 'out')
            return tf.stack(out, name = 'output')
项目:kaggle    作者:kingmacrobo    | 项目源码 | 文件源码
def fcn_net(self, x, train=True):
        conv1 = conv2d(x, [3, 3, 3, 32], 'conv1')
        maxp1 = maxpooling(conv1)

        conv2 = conv2d(maxp1, [3, 3, 32, 32], 'conv2')
        maxp2 = maxpooling(conv2)

        conv3 = conv2d(maxp2, [3, 3, 32, 64], 'conv3')
        maxp3 = maxpooling(conv3)

        conv4 = conv2d(maxp3, [3, 3, 64, 64], 'conv4')
        maxp4 = maxpooling(conv4)

        conv5 = conv2d(maxp4, [3, 3, 64, 128], 'conv5')
        maxp5 = maxpooling(conv5)

        conv6 = conv2d(maxp5, [3, 3, 128, 128], 'conv6')
        maxp6 = maxpooling(conv6)

        conv7 = conv2d(maxp6, [3, 3, 128, 256], 'conv7')
        maxp7 = maxpooling(conv7)

        conv8 = conv2d(maxp7, [3, 3, 256, 256], 'conv8')
        maxp8 = maxpooling(conv8)

        conv9 = conv2d(maxp8, [3, 3, 256, 512], 'conv9')
        maxp9 = maxpooling(conv9)

        drop = tf.nn.dropout(maxp9, self.dropout)

        # 1x1 convolution + sigmoid activation
        net = conv2d(drop, [1, 1, 512, self.input_size*self.input_size], 'conv10', activation='no')

        # squeeze the last two dimension in train
        if train:
            net = tf.squeeze(net, [1, 2], name="squeezed")

        return net
项目:kaggle    作者:kingmacrobo    | 项目源码 | 文件源码
def u_net(self, x, layers=4, base_channel=64, train=True):
        ds_layers = {}
        ds_layer_shape = {}

        # down sample layers
        for layer in range(0, layers-1):
            f_channels = base_channel * (2**layer)
            layer_name = 'ds_{}'.format(layer)
            if layer == 0:
                x = conv2d(x, [3, 3, 3, f_channels], layer_name + '_1')
            else:
                x = conv2d(x, [3, 3, f_channels/2, f_channels], layer_name + '_1')

            x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_2')
            ds_layers[layer] = x
            ds_layer_shape[layer] = tf.shape(x)

            x = maxpooling(x)

        # bottom layer
        f_channels = base_channel * (2**(layers-1))
        x = conv2d(x, [3, 3, f_channels/2, f_channels], 'bottom_1')
        x = conv2d(x, [3, 3, f_channels, f_channels], 'bottom_2')

        # up sample layers
        for layer in range(layers-2, -1, -1):
            f_channels = base_channel * (2**layer)
            layer_name = 'up_{}'.format(layer)
            x = deconv2d(x, [3, 3, f_channels, 2*f_channels], ds_layer_shape[layer], layer_name + '_deconv2d')

            # add the previous down sumple layer to the up sample layer
            x = concat(ds_layers[layer], x)

            x = conv2d(x, [3, 3, 2*f_channels, f_channels], layer_name + '_conv_1')
            x = conv2d(x, [3, 3, f_channels, f_channels], layer_name + '_conv_2')
            #if train:
            #    x = tf.nn.dropout(x, self.dropout)

        # add 1x1 convolution layer to change channel to one
        x = conv2d(x, [1, 1, base_channel, 1], 'conv_1x1', activation='no')

        logits = tf.squeeze(x, axis=3)

        return logits
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def _build_encoder(self):
        with tf.variable_scope(self.name):
            if self.arch == 'FC':
                layer_i = layers.flatten(self.input_ph)
                for i, layer_size in enumerate(self.fc_layer_sizes):
                    layer_i = layers.fc('fc{}'.format(i+1), layer_i, layer_size, activation=self.activation)[-1]
                self.ox = layer_i
            elif self.arch == 'ATARI-TRPO':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', self.input_ph, 16, 4, self.input_channels, 2, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', self.o1, 16, 4, 16, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.fc('fc3', layers.flatten(self.o2), 20, activation=self.activation)
                self.ox = self.o3
            elif self.arch == 'NIPS':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', self.input_ph, 16, 8, self.input_channels, 4, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', self.o1, 32, 4, 16, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.fc('fc3', layers.flatten(self.o2), 256, activation=self.activation)
                self.ox = self.o3
            elif self.arch == 'NATURE':
                self.w1, self.b1, self.o1 = layers.conv2d('conv1', self.input_ph, 32, 8, self.input_channels, 4, activation=self.activation)
                self.w2, self.b2, self.o2 = layers.conv2d('conv2', self.o1, 64, 4, 32, 2, activation=self.activation)
                self.w3, self.b3, self.o3 = layers.conv2d('conv3', self.o2, 64, 3, 64, 1, activation=self.activation)
                self.w4, self.b4, self.o4 = layers.fc('fc4', layers.flatten(self.o3), 512, activation=self.activation)
                self.ox = self.o4
            else:
                raise Exception('Invalid architecture `{}`'.format(self.arch))

            if self.use_recurrent:
                with tf.variable_scope('lstm_layer') as vs:
                    self.lstm_cell = tf.contrib.rnn.BasicLSTMCell(
                        self.hidden_state_size, state_is_tuple=True, forget_bias=1.0)

                    batch_size = tf.shape(self.step_size)[0]
                    self.ox_reshaped = tf.reshape(self.ox,
                        [batch_size, -1, self.ox.get_shape().as_list()[-1]])
                    state_tuple = tf.contrib.rnn.LSTMStateTuple(
                        *tf.split(self.initial_lstm_state, 2, 1))

                    self.lstm_outputs, self.lstm_state = tf.nn.dynamic_rnn(
                        self.lstm_cell,
                        self.ox_reshaped,
                        initial_state=state_tuple,
                        sequence_length=self.step_size,
                        time_major=False)

                    self.lstm_state = tf.concat(self.lstm_state, 1)
                    self.ox = tf.reshape(self.lstm_outputs, [-1,self.hidden_state_size], name='reshaped_lstm_outputs')

                    # Get all LSTM trainable params
                    self.lstm_trainable_variables = [v for v in 
                        tf.trainable_variables() if v.name.startswith(vs.name)]

            return self.ox