Python lasagne.layers 模块,Upscale2DLayer() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用lasagne.layers.Upscale2DLayer()

项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_GlobalPoolLayer(self, layer, feeder):
        assert isinstance(layer, L.GlobalPoolLayer)
        assert layer.pool_function == T.mean
        assert len(L.get_output_shape(layer.input_layer)) == 4

        target_shape = L.get_output_shape(feeder)+(1,1)
        if target_shape[0] is None:
            target_shape = (-1,) + target_shape[1:]

        feeder = L.ReshapeLayer(feeder, target_shape)

        upscaling = L.get_output_shape(layer.input_layer)[2:]
        feeder = L.Upscale2DLayer(feeder, upscaling)

        def expression(x):
            return x / np.prod(upscaling).astype(theano.config.floatX)
        feeder = L.ExpressionLayer(feeder, expression)
        return feeder
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def build_model():
    net = {}
    net['input'] = InputLayer((None, 512*20, 3, 3))

    au_fc_layers=[]
    for i in range(20):
        net['roi_AU_N_'+str(i)]=SliceLayer(net['input'],indices=slice(i*512,(i+1)*512),axis=1)

        #try to adding upsampling here for more conv

        net['Roi_upsample_'+str(i)]=Upscale2DLayer(net['roi_AU_N_'+str(i)],scale_factor=2)

        net['conv_roi_'+str(i)]=ConvLayer(net['Roi_upsample_'+str(i)],512,3)

        net['au_fc_'+str(i)]=DenseLayer(net['conv_roi_'+str(i)],num_units=150)

        au_fc_layers+=[net['au_fc_'+str(i)]]

    #
    net['local_fc']=concat(au_fc_layers)
    net['local_fc2']=DenseLayer(net['local_fc'],num_units=2048)

    net['local_fc_dp']=DropoutLayer(net['local_fc2'],p=0.5)


    # net['fc_comb']=concat([net['au_fc_layer'],net['local_fc_dp']])


    # net['fc_dense']=DenseLayer(net['fc_comb'],num_units=1024)

    # net['fc_dense_dp']=DropoutLayer(net['fc_dense'],p=0.3)

    net['real_out']=DenseLayer(net['local_fc_dp'],num_units=12,nonlinearity=sigmoid)


    # net['final']=concat([net['pred_pos_layer'],net['output_layer']])

    return net
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def build_nets(input_var, channels=1, do_batchnorm=True, z_dim=100):

    def ns(shape):
        ret=list(shape)
        ret[0]=[0]
        return tuple(ret)

    ret = {}
    bn = batch_norm if do_batchnorm else lambda x:x
    ret['ae_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['ae_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['ae_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['ae_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_enc'] = layer = DenseLayer(layer, num_units=z_dim,
            nonlinearity=nn.nonlinearities.tanh)
    ret['ae_unenc'] = layer = bn(nn.layers.DenseLayer(layer,
        num_units = np.product(nn.layers.get_output_shape(ret['ae_pool2'])[1:])))
    ret['ae_resh'] = layer = ReshapeLayer(layer,
            shape=ns(nn.layers.get_output_shape(ret['ae_pool2'])))
    ret['ae_depool2'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=3,
        pad='full'))
    ret['ae_depool1'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_out'] = Conv2DLayer(layer, num_filters=1, filter_size=5, pad='full',
            nonlinearity=nn.nonlinearities.sigmoid)

    ret['disc_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['disc_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['disc_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['disc_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_hid'] = layer = bn(DenseLayer(layer, num_units=100))
    ret['disc_out'] = DenseLayer(layer, num_units=1, nonlinearity=nn.nonlinearities.sigmoid)

    return ret
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def build_fcn_segmenter(input_var, shape, version=2):
    ret = {}

    if version == 2:
        ret['input'] = la = InputLayer(shape, input_var)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=7,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3,
            pad='full'))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['output'] = la = Conv2DLayer(la, num_filters=1, filter_size=7,
                pad='full', nonlinearity=nn.nonlinearities.sigmoid)

    return ret, nn.layers.get_output(ret['output']), \
            nn.layers.get_output(ret['output'], deterministic=True)
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(input_var, input_shape, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    input = InputLayer(shape=input_shape, input_var=input_var, name='input')
    conv2d1 = Conv2DLayer(input, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
    conv2d3 = Conv2DLayer(maxpool2d2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    conv2d5 = Conv2DLayer(maxpool2d4, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dense7 = DenseLayer(reshape6, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    bottleneck = DenseLayer(dense7, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    print_network(reshape16)
    return reshape16
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(incoming, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    conv2d1 = Conv2DLayer(incoming, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d3 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d3')
    bn2 = BatchNormLayer(maxpool2d3, name='batchnorm2')
    conv2d4 = Conv2DLayer(bn2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d4', nonlinearity=scaled_tanh)
    maxpool2d6 = MaxPool2DLayer(conv2d4, pool_size=pool_size, name='maxpool2d6', pad=(1,0))
    bn3 = BatchNormLayer(maxpool2d6, name='batchnorm3')
    conv2d7 = Conv2DLayer(bn3, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d7', nonlinearity=scaled_tanh)
    reshape9 = ReshapeLayer(conv2d7, shape=([0], -1), name='reshape9')  # 3000
    reshape9_output = reshape9.output_shape[1]
    bn8 = BatchNormLayer(reshape9, name='batchnorm8')
    dense10 = DenseLayer(bn8, num_units=dense_mid_size, name='dense10', nonlinearity=scaled_tanh)
    bn11 = BatchNormLayer(dense10, name='batchnorm11')
    bottleneck = DenseLayer(bn11, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense12 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense12', nonlinearity=linear)
    dense13 = DenseLayer(dense12, num_units=reshape9_output, W=dense10.W.T, nonlinearity=scaled_tanh, name='dense13')
    reshape14 = ReshapeLayer(dense13, shape=([0], conv_num_filters3, 3, 5), name='reshape14')  # 32 x 4 x 7
    deconv2d19 = Deconv2DLayer(reshape14, conv2d7.input_shape[1], conv2d7.filter_size, stride=conv2d7.stride,
                               W=conv2d7.W, flip_filters=not conv2d7.flip_filters, name='deconv2d19', nonlinearity=scaled_tanh)
    upscale2d16 = Upscale2DLayer(deconv2d19, scale_factor=pool_size, name='upscale2d16')
    deconv2d17 = Deconv2DLayer(upscale2d16, conv2d4.input_shape[1], conv2d4.filter_size, stride=conv2d4.stride,
                               W=conv2d4.W, flip_filters=not conv2d4.flip_filters, name='deconv2d17', nonlinearity=scaled_tanh)
    upscale2d18 = Upscale2DLayer(deconv2d17, scale_factor=pool_size, name='upscale2d18')
    deconv2d19 = Deconv2DLayer(upscale2d18, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape20 = ReshapeLayer(deconv2d19, ([0], -1), name='reshape20')
    return reshape20, bottleneck
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(incoming, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    conv2d1 = Conv2DLayer(incoming, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
    conv2d3 = Conv2DLayer(maxpool2d2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    conv2d5 = Conv2DLayer(maxpool2d4, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dense7 = DenseLayer(reshape6, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    bottleneck = DenseLayer(dense7, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    return reshape16, bottleneck
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self,layer,feeder):
        # Warning they are swapped here
        feeder = self._put_rectifiers(feeder,layer)
        feeder = self._get_normalised_relevance_layer(layer,feeder)

        f_s = layer.filter_size
        if layer.pad == 'same':
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.
        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: similar code gradient based explainers. Merge.
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
            conv_layer = output_layer
            tmp = L.SliceLayer(output_layer, slice(0, -3), axis=3)
            output_layer = L.SliceLayer(tmp, slice(0, -3), axis=2)
            output_layer.W = conv_layer.W
        else:
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
        W = output_layer.W

        # Do the multiplication.
        x_layer = L.ReshapeLayer(layer.input_layer,
                                 (-1,)+L.get_output_shape(output_layer)[1:])
        output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
                                            merge_function=T.mul)
        output_layer.W = W
        return output_layer
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self, layer, feeder):
        def _check_padding_same():
            for s, p in zip(layer.filter_size, layer.pad):
                if s % 2 != 1:
                    return False
                elif s//2 != p:
                    return False
            return True

        # Warning they are swapped here.
        feeder = self._put_rectifiers(feeder,layer)

        f_s = layer.filter_size
        if layer.pad == 'same' or _check_padding_same():
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.

        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: clean this!
            print("Applying alexnet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
            print("Applying alexnet hack part 2.")
            conv_layer = output_layer
            output_layer = L.SliceLayer(L.SliceLayer(output_layer,
                                                     slice(0,-3), axis=3),
                                        slice(0,-3), axis=2)
            output_layer.W = conv_layer.W
        elif layer.stride == (2,2):
            # Todo: clean this! Seems to be the same code as for AlexNet above.
            print("Applying GoogLeNet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        else:
            # Todo: clean this. Repetitions all over.
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        return output_layer
项目:began    作者:davidtellez    | 项目源码 | 文件源码
def network_discriminator(self, input, network_weights=None):

        layers = []

        if isinstance(input, lasagne.layers.Layer):
            layers.append(input)

            # First convolution
            layers.append(conv_layer(input, n_filters=self.n_filters, stride=1, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))

        else:
            # Input layer
            layers.append(InputLayer(shape=(None, 3, self.input_size, self.input_size), input_var=input, name='discriminator/encoder/input'))

            # First convolution
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters, stride=1, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))

        # Convolutional blocks (encoder)self.n_filters*i_block
        n_blocks = int(np.log2(self.input_size/8)) + 1  # end up with 8x8 output
        for i_block in range(1, n_blocks+1):
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters*i_block, stride=1, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters*i_block, stride=1, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))
            if i_block != n_blocks:
                # layers.append(conv_layer(layers[-1], n_filters=self.n_filters*(i_block+1), stride=2, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))
                layers.append(MaxPool2DLayer(layers[-1], pool_size=2, stride=2, name='discriminator/encoder/pooling%d' % len(layers)))
            # else:
            #     layers.append(conv_layer(layers[-1], n_filters=self.n_filters*(i_block), stride=1, name='discriminator/encoder/conv%d' % len(layers), network_weights=network_weights))

        # Dense layers (linear outputs)
        layers.append(dense_layer(layers[-1], n_units=self.hidden_size, name='discriminator/encoder/dense%d' % len(layers), network_weights=network_weights))

        # Dense layer up (from h to n*8*8)
        layers.append(dense_layer(layers[-1], n_units=(8 * 8 * self.n_filters), name='discriminator/decoder/dense%d' % len(layers), network_weights=network_weights))
        layers.append(ReshapeLayer(layers[-1], (-1, self.n_filters, 8, 8), name='discriminator/decoder/reshape%d' % len(layers)))

        # Convolutional blocks (decoder)
        for i_block in range(1, n_blocks+1):
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters, stride=1, name='discriminator/decoder/conv%d' % len(layers), network_weights=network_weights))
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters, stride=1, name='discriminator/decoder/conv%d' % len(layers), network_weights=network_weights))
            if i_block != n_blocks:
                layers.append(Upscale2DLayer(layers[-1], scale_factor=2, name='discriminator/decoder/upsample%d' % len(layers)))

        # Final layer (make sure input images are in the range [-1, 1]
        layers.append(conv_layer(layers[-1], n_filters=3, stride=1, name='discriminator/decoder/output', network_weights=network_weights, nonlinearity=sigmoid))

        # Network in dictionary form
        network = {layer.name: layer for layer in layers}

        return network
项目:began    作者:davidtellez    | 项目源码 | 文件源码
def network_generator(self, input_var, network_weights=None):

        # Input layer
        layers = []
        n_blocks = int(np.log2(self.input_size / 8)) + 1  # end up with 8x8 output
        layers.append(InputLayer(shape=(None, self.hidden_size), input_var=input_var, name='generator/input'))

        # Dense layer up (from h to n*8*8)
        layers.append(dense_layer(layers[-1], n_units=(8 * 8 * self.n_filters), name='generator/dense%d' % len(layers), network_weights=network_weights))
        layers.append(ReshapeLayer(layers[-1], (-1, self.n_filters, 8, 8), name='generator/reshape%d' % len(layers)))

        # Convolutional blocks (decoder)
        for i_block in range(1, n_blocks+1):
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters, stride=1, name='generator/conv%d' % len(layers), network_weights=network_weights))
            layers.append(conv_layer(layers[-1], n_filters=self.n_filters, stride=1, name='generator/conv%d' % len(layers), network_weights=network_weights))
            if i_block != n_blocks:
                layers.append(Upscale2DLayer(layers[-1], scale_factor=2, name='generator/upsample%d' % len(layers)))

        # Final layer (make sure input images are in the range [-1, 1] if tanh used)
        layers.append(conv_layer(layers[-1], n_filters=3, stride=1, name='generator/output', network_weights=network_weights, nonlinearity=sigmoid))

        # Network in dictionary form
        network = {layer.name: layer for layer in layers}

        return network

    # def network_generator_alt(self, input_var, network_weights=None):
    #
    #     # Input layer
    #     layers = []
    #     n_blocks = int(np.log2(self.input_size / 8)) + 1  # end up with 8x8 output
    #     layers.append(InputLayer(shape=(None, self.hidden_size), input_var=input_var, name='generator/input'))
    #
    #     # Dense layer up (from h to n*8*8)
    #     layers.append(dense_layer(layers[-1], n_units=(8 * 8 * self.n_filters*n_blocks), name='generator/dense%d' % len(layers), network_weights=network_weights, nonlinearity=elu, bn=True))
    #     layers.append(ReshapeLayer(layers[-1], (-1, self.n_filters*n_blocks, 8, 8), name='generator/reshape%d' % len(layers)))
    #
    #     # Convolutional blocks (decoder)
    #     for i_block in range(1, n_blocks+1)[::-1]:
    #         # layers.append(conv_layer(layers[-1], n_filters=self.n_filters*(i_block), stride=1, name='generator/conv%d' % len(layers), network_weights=network_weights, bn=True))
    #         # layers.append(conv_layer(layers[-1], n_filters=self.n_filters*(i_block), stride=1, name='generator/conv%d' % len(layers), network_weights=network_weights, bn=True))
    #         if i_block != 1:
    #             layers.append(transposed_conv_layer(layers[-1], n_filters=self.n_filters*(i_block-1), stride=2, name='generator/upsample%d' % len(layers),
    #                                                 output_size=8*2**(n_blocks-i_block+1), network_weights=network_weights, nonlinearity=elu, bn=True))
    #
    #     # Final layer (make sure input images are in the range [-1, 1]
    #     layers.append(conv_layer(layers[-1], n_filters=3, stride=1, name='generator/output', network_weights=network_weights, nonlinearity=tanh, bn=False))
    #
    #     # Network in dictionary form
    #     network = {layer.name: layer for layer in layers}
    #
    #     return network
项目:saliency-salgan-2017    作者:imatge-upc    | 项目源码 | 文件源码
def build_decoder(net):
    net['uconv5_3']= ConvLayer(net['conv5_3'], 512, 3, pad=1)
    print "uconv5_3: {}".format(net['uconv5_3'].output_shape[1:])

    net['uconv5_2'] = ConvLayer(net['uconv5_3'], 512, 3, pad=1)
    print "uconv5_2: {}".format(net['uconv5_2'].output_shape[1:])

    net['uconv5_1'] = ConvLayer(net['uconv5_2'], 512, 3, pad=1)
    print "uconv5_1: {}".format(net['uconv5_1'].output_shape[1:])

    net['upool4'] = Upscale2DLayer(net['uconv5_1'], scale_factor=2)
    print "upool4: {}".format(net['upool4'].output_shape[1:])

    net['uconv4_3'] = ConvLayer(net['upool4'], 512, 3, pad=1)
    print "uconv4_3: {}".format(net['uconv4_3'].output_shape[1:])

    net['uconv4_2'] = ConvLayer(net['uconv4_3'], 512, 3, pad=1)
    print "uconv4_2: {}".format(net['uconv4_2'].output_shape[1:])

    net['uconv4_1'] = ConvLayer(net['uconv4_2'], 512, 3, pad=1)
    print "uconv4_1: {}".format(net['uconv4_1'].output_shape[1:])

    net['upool3'] = Upscale2DLayer(net['uconv4_1'], scale_factor=2)
    print "upool3: {}".format(net['upool3'].output_shape[1:])

    net['uconv3_3'] = ConvLayer(net['upool3'], 256, 3, pad=1)
    print "uconv3_3: {}".format(net['uconv3_3'].output_shape[1:])

    net['uconv3_2'] = ConvLayer(net['uconv3_3'], 256, 3, pad=1)
    print "uconv3_2: {}".format(net['uconv3_2'].output_shape[1:])

    net['uconv3_1'] = ConvLayer(net['uconv3_2'], 256, 3, pad=1)
    print "uconv3_1: {}".format(net['uconv3_1'].output_shape[1:])

    net['upool2'] = Upscale2DLayer(net['uconv3_1'], scale_factor=2)
    print "upool2: {}".format(net['upool2'].output_shape[1:])

    net['uconv2_2'] = ConvLayer(net['upool2'], 128, 3, pad=1)
    print "uconv2_2: {}".format(net['uconv2_2'].output_shape[1:])

    net['uconv2_1'] = ConvLayer(net['uconv2_2'], 128, 3, pad=1)
    print "uconv2_1: {}".format(net['uconv2_1'].output_shape[1:])

    net['upool1'] = Upscale2DLayer(net['uconv2_1'], scale_factor=2)
    print "upool1: {}".format(net['upool1'].output_shape[1:])

    net['uconv1_2'] = ConvLayer(net['upool1'], 64, 3, pad=1,)
    print "uconv1_2: {}".format(net['uconv1_2'].output_shape[1:])

    net['uconv1_1'] = ConvLayer(net['uconv1_2'], 64, 3, pad=1)
    print "uconv1_1: {}".format(net['uconv1_1'].output_shape[1:])

    net['output'] = ConvLayer(net['uconv1_1'], 1, 1, pad=0,nonlinearity=sigmoid)
    print "output: {}".format(net['output'].output_shape[1:])

    return net
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(incoming, options):
    input_p = 0.2
    hidden_p = 0.5
    conv_num_filters1 = int(100 / (1.0 - input_p))
    conv_num_filters2 = int(150 / (1.0 - hidden_p))
    conv_num_filters3 = int(200 / (1.0 - hidden_p))
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = int(options['BOTTLENECK'] / 0.5)
    dense_mid_size = int(options['DENSE'] / 0.5)
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()
    dropout0 = DropoutLayer(incoming, p=0.2, name='dropout0')
    conv2d1 = Conv2DLayer(dropout0, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
    dropout1 = DropoutLayer(maxpool2d2, name='dropout1')
    conv2d3 = Conv2DLayer(dropout1, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    dropout2 = DropoutLayer(maxpool2d4, name='dropout2')
    conv2d5 = Conv2DLayer(dropout2, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dropout3 = DropoutLayer(reshape6, name='dropout3')
    dense7 = DenseLayer(dropout3, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    dropout4 = DropoutLayer(dense7, name='dropout4')
    bottleneck = DenseLayer(dropout4, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    return reshape16, bottleneck
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(incoming, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()
    dropout0 = DropoutLayer(incoming, p=0.2, name='dropout0')
    conv2d1 = Conv2DLayer(dropout0, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    bn1 = BatchNormLayer(conv2d1, name='batchnorm1')
    maxpool2d2 = MaxPool2DLayer(bn1, pool_size=pool_size, name='maxpool2d2')
    dropout1 = DropoutLayer(maxpool2d2, name='dropout1')
    conv2d3 = Conv2DLayer(dropout1, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    bn2 = BatchNormLayer(conv2d3, name='batchnorm2')
    maxpool2d4 = MaxPool2DLayer(bn2, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    dropout2 = DropoutLayer(maxpool2d4, name='dropout2')
    conv2d5 = Conv2DLayer(dropout2, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    bn3 = BatchNormLayer(conv2d5, name='batchnorm3')
    reshape6 = ReshapeLayer(bn3, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dropout3 = DropoutLayer(reshape6, name='dropout3')
    dense7 = DenseLayer(dropout3, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    bn4 = BatchNormLayer(dense7, name='batchnorm4')
    dropout4 = DropoutLayer(bn4, name='dropout4')
    bottleneck = DenseLayer(dropout4, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    return reshape16, bottleneck