Python lasagne 模块,layers() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用lasagne.layers()

项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def prepare_style(self, scale=1.0):
        """Called each phase of the optimization, process the style image according to the scale, then run it
        through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches.
        """
        style_img = self.rescale_image(self.style_img_original, scale)
        self.style_img = self.model.prepare_image(style_img)

        style_map = self.rescale_image(self.style_map_original, scale)
        self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32)

        # Compile a function to run on the GPU to extract patches for all layers at once.
        layer_outputs = zip(self.style_layers, self.model.get_outputs('sem', self.style_layers))
        extractor = self.compile([self.model.tensor_img, self.model.tensor_map], self.do_extract_patches(layer_outputs))
        result = extractor(self.style_img, self.style_map)

        # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. 
        self.style_data = {}
        for layer, *data in zip(self.style_layers, result[0::3], result[1::3], result[2::3]):
            patches = data[0]
            l = self.model.network['nn'+layer]
            l.num_filters = patches.shape[0] // args.slices
            self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\
                                   + [np.zeros((patches.shape[0],), dtype=np.float16)]
            print('  - Style layer {}: {} patches in {:,}kb.'.format(layer, patches.shape, patches.size//1000))
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def freezeParameters(net, single=True):
    """
    Freeze parameters of a layer or a network so that they are not trainable
    anymore

    Parameters
    ----------
    net: a network layer
    single: whether to freeze a single layer of all of the layers below as well
    """
    all_layers = lasagne.layers.get_all_layers(net)

    if single:
        all_layers = [all_layers[-1]]

    for layer in all_layers:
        layer_params = layer.get_params()
        for p in layer_params:
            try:
                layer.params[p].remove('trainable')
            except KeyError:
                pass
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def unfreezeParameters(net, single=True):
    """
    Unfreeze parameters of a layer or a network so that they become trainable
    again

    Parameters
    ----------
    net: a network layer
    single: whether to freeze a single layer of all of the layers below as well
    """
    all_layers = lasagne.layers.get_all_layers(net)

    if single:
        all_layers = [all_layers[-1]]

    for layer in all_layers:
        layer_params = layer.get_params()
        for p in layer_params:
            try:
                layer.params[p].add('trainable')
            except KeyError:
                pass
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def __init__(self, l_in, bgr_mean=np.array([103.939, 116.779, 123.68]),
                 data_format='bc01', **kwargs):
        """A Layer to normalize and convert images from RGB to BGR
        This layer converts images from RGB to BGR to adapt to Caffe
        that uses OpenCV, which uses BGR. It also subtracts the
        per-pixel mean. From:
        https://github.com/fvisin/reseg/blob/variable_size_images/vgg16.py
        Parameters
        ----------
        l_in : :class:``lasagne.layers.Layer``
            The incoming layer, typically an
            :class:``lasagne.layers.InputLayer``
        bgr_mean : iterable of 3 ints
            The mean of each channel. By default, the ImageNet
            mean values are used.
        data_format : str
            The format of l_in, either `b01c` (batch, rows, cols,
            channels) or `bc01` (batch, channels, rows, cols)
        """
        super(RGBtoBGRLayer, self).__init__(l_in, **kwargs)
        assert data_format in ['bc01', 'b01c']
        self.l_in = l_in
        floatX = theano.config.floatX
        self.bgr_mean = bgr_mean.astype(floatX)
        self.data_format = data_format
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def get_output_for(self, input, deterministic=False, **kwargs):
        def _phase_shift(input,r):
            bsize,c,a,b = input.shape[0],1,self.output_shape[2]//r,self.output_shape[3]//r
            X = T.reshape(input, (bsize,r,r,a,b))
            X = T.transpose(X, (0, 3,4,1,2))  # bsize, a, b, r2,r1
            X = T.split(x=X,splits_size=[1]*a,n_splits=a,axis=1)  # a, [bsize, b, r, r]
            X = [T.reshape(x,(bsize,b,r,r))for x in X]
            X = T.concatenate(X,axis=2)  # bsize, b, a*r, r 
            X = T.split(x=X,splits_size =[1]*b,n_splits=b,axis=1)  # b, [bsize, a*r, r]
            X = [T.reshape(x,(bsize,a*r,r))for x in X]
            X = T.concatenate(X,axis=2) # bsize, a*r, b*r 
            return X.dimshuffle(0,'x',1,2)
        Xc = T.split(x=input,splits_size =[input.shape[1]//self.c]*self.c,n_splits=self.c,axis=1)
        return T.concatenate([_phase_shift(xc,self.r) for xc in Xc],axis=1)        

# Multiscale Dilated Convolution Block
# This function (not a layer in and of itself, though you could make it one) returns a set of concatenated conv2d and dilatedconv2d layers.
# Each layer uses the same basic filter W, operating at a different dilation factor (or taken as the mean of W for the 1x1 conv).
# The channel-wise output of each layer is weighted by a set of coefficients, which are initialized to 1 / the total number of dilation scales,
# meaning that were starting by taking an elementwise mean. These should be learnable parameters.

# NOTES: - I'm considering changing the variable names to be more descriptive, and look less like ridiculous academic code. It's on the to-do list.
#        - I keep the bias and nonlinearity out of the default definition for this layer, as I expect it to be batchnormed and nonlinearized in the model config.
项目:cav_gcnn    作者:myinxd    | 项目源码 | 文件源码
def cnn_build(self, max_epochs=20, batch_size=100,
                  learning_rate=0.001, momentum=0.9,
                  verbose=1):
        """Build the network"""
        if batch_size is None:
            self.net = NeuralNet(
                layers=self.layers,
                max_epochs=max_epochs,
                update=lasagne.updates.nesterov_momentum,
                update_learning_rate=learning_rate,
                update_momentum=momentum,
                regression=False,
                verbose=verbose)
        else:
            # batch iterator
            batch_iterator = self.gen_BatchIterator(batch_size=batch_size)
            self.net = NeuralNet(
                layers=self.layers,
                batch_iterator_train=batch_iterator,
                max_epochs=max_epochs,
                update=lasagne.updates.nesterov_momentum,
                update_learning_rate=learning_rate,
                update_momentum=momentum,
                regression=False,
                verbose=verbose)
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def do_match_patches(self, layer):
        # Use node in the model to compute the result of the normalized cross-correlation, using results from the
        # nearest-neighbor layers called 'nn3_1' and 'nn4_1'.
        dist = self.matcher_outputs[layer]
        dist = dist.reshape((dist.shape[1], -1))
        # Compute the score of each patch, taking into account statistics from previous iteration. This equalizes
        # the chances of the patches being selected when the user requests more variety.
        offset = self.matcher_history[layer].reshape((-1, 1))
        scores = (dist - offset * args.variety)
        # Pick the best style patches for each patch in the current image, the result is an array of indices.
        # Also return the maximum value along both axis, used to compare slices and add patch variety.
        return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)]


    #------------------------------------------------------------------------------------------------------------------
    # Error/Loss Functions
    #------------------------------------------------------------------------------------------------------------------
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def style_loss(self):
        """Returns a list of loss components as Theano expressions. Finds the best style patch for each patch in the
        current image using normalized cross-correlation, then computes the mean squared error for all patches.
        """
        style_loss = []
        if args.style_weight == 0.0:
            return style_loss

        # Extract the patches from the current image, as well as their magnitude.
        result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('conv', self.style_layers)))

        # Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here.
        for l, matches, patches in zip(self.style_layers, self.tensor_matches, result[0::3]):
            # Compute the mean squared error between the current patch and the best matching style patch.
            # Ignore the last channels (from semantic map) so errors returned are indicative of image only.
            loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0)
            style_loss.append(('style', l, args.style_weight * loss))
        return style_loss
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def get_objective(l1=0, l2=0.005):
    def objective(layers, loss_function, target, aggregate=aggregate,
                  deterministic=False, get_output_kw=None):
        if get_output_kw is None:
            get_output_kw = {}
        output_layer = layers[-1]
        first_layer = layers[1]
        network_output = lasagne.layers.get_output(
            output_layer, deterministic=deterministic, **get_output_kw)
        if not deterministic:
            losses = loss_function(network_output, target) \
                    + l2 * regularization.regularize_network_params(
                        output_layer, regularization.l2) \
                    + l1 * regularization.regularize_layer_params(
                        output_layer, regularization.l1)
        else:
            losses = loss_function(network_output, target)
        return aggregate(losses)
    return objective
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def exe_rnn(use_embedd, length, num_units, position, binominal):
    batch_size = BATCH_SIZE

    input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
    target_var = T.ivector(name='targets')

    layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')
    if use_embedd:
        layer_position = construct_position_input(batch_size, length, num_units)
        layer_input = lasagne.layers.concat([layer_input, layer_position], axis=2)

    layer_rnn = RecurrentLayer(layer_input, num_units, nonlinearity=nonlinearities.tanh, only_return_final=True,
                               W_in_to_hid=lasagne.init.GlorotUniform(), W_hid_to_hid=lasagne.init.GlorotUniform(),
                               b=lasagne.init.Constant(0.), name='RNN')
    # W = layer_rnn.W_hid_to_hid.sum()
    # U = layer_rnn.W_in_to_hid.sum()
    # b = layer_rnn.b.sum()

    layer_output = DenseLayer(layer_rnn, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')

    return train(layer_output, layer_rnn, input_var, target_var, batch_size, length, position, binominal)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def create_updates(loss, network, opt, learning_rate, momentum, beta1, beta2):
    params = lasagne.layers.get_all_params(network, trainable=True)
    grads = theano.grad(loss, params)
    # if max_norm:
    #     names = ['crf.U', 'crf.W_h', 'crf.W_c', 'crf.b']
    #     constraints = [grad for param, grad in zip(params, grads) if param.name in names]
    #     assert len(constraints) == 4
    #     scaled_grads = total_norm_constraint(constraints, max_norm=max_norm)
    #     counter = 0
    #     for i in xrange(len(params)):
    #         param = params[i]
    #         if param.name in names:
    #             grads[i] = scaled_grads[counter]
    #             counter += 1
    #     assert counter == 4
    if opt == 'adam':
        updates = adam(grads, params=params, learning_rate=learning_rate, beta1=beta1, beta2=beta2)
    elif opt == 'momentum':
        updates = nesterov_momentum(grads, params=params, learning_rate=learning_rate, momentum=momentum)
    else:
        raise ValueError('unkown optimization algorithm: %s' % opt)

    return updates
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def __init__(self):
        self.network = collections.OrderedDict()
        self.network['img'] = InputLayer((None, 3, None, None))
        self.network['seed'] = InputLayer((None, 3, None, None))

        config, params = self.load_model()
        self.setup_generator(self.last_layer(), config)

        if args.train:
            concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
            self.setup_perceptual(concatenated)
            self.load_perceptual()
            self.setup_discriminator()
        self.load_generator(params)
        self.compile()

    #------------------------------------------------------------------------------------------------------------------
    # Network Configuration
    #------------------------------------------------------------------------------------------------------------------
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    print ("critic output:", layer.output_shape)
    return layer
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var)
        critic = build_critic(self.input_var)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))

        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_critic(input_var=None, verbose=False):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    if verbose: print ("critic output:", layer.output_shape)
    return layer
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var,self.verbose)
        critic = build_critic(self.input_var,self.verbose)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))

        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet50(input_shape=(None, 3, 224, 224))

        if self.verbose: print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob'])))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def compile_val(self):

        if self.verbose: print('compiling validation function...')

        import theano

        from lasagne.layers import get_output

        output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)

        from lasagne.objectives import categorical_accuracy, categorical_crossentropy

        cost = categorical_crossentropy(output_val, self.y).mean()
        error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
        error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()

        self.val_fn=  theano.function([self.subb_ind], [cost,error,error_top_5], updates=[], 
                                          givens=[(self.x, self.shared_x_slice),
                                                  (self.y, self.shared_y_slice)]
                                                                )
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_vgg16(input_shape=(None, 3, 224, 224), verbose=self.verbose)
        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def compile_val(self):

        if self.verbose: print('compiling validation function...')

        import theano

        from lasagne.layers import get_output

        output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)

        from lasagne.objectives import categorical_accuracy, categorical_crossentropy

        cost = categorical_crossentropy(output_val, self.y).mean()
        error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
        error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()

        self.val_fn=  theano.function([self.subb_ind], [cost,error,error_top_5], updates=[], 
                                          givens=[(self.x, self.shared_x_slice),
                                                  (self.y, self.shared_y_slice)]
                                                                )
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_critic(input_var=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear and without bias)
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print ("critic output:", layer.output_shape)
    return layer
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var)
        critic = build_critic(self.input_var)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))


        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet152(input_shape=(None, 3, 224, 224))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def compile_val(self):

        if self.verbose: print('compiling validation function...')

        import theano

        from lasagne.layers import get_output

        output_val = lasagne.layers.get_output(self.output_layer, self.x, deterministic=True)

        from lasagne.objectives import categorical_accuracy, categorical_crossentropy

        cost = categorical_crossentropy(output_val, self.y).mean()
        error = 1-categorical_accuracy(output_val, self.y, top_k=1).mean()
        error_top_5 = 1-categorical_accuracy(output_val, self.y, top_k=5).mean()

        self.val_fn=  theano.function([self.subb_ind], [cost,error,error_top_5], updates=[], 
                                          givens=[(self.x, self.shared_x_slice),
                                                  (self.y, self.shared_y_slice)]
                                                                )
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
                    precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
                       precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                     peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn?
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_batch = n_batch
    self.n_lat = 100
    self.n_dim = n_dim
    self.n_chan = n_chan

    # invoke parent constructor
    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    _, _, _, _, l_sample, l_p_z = self.network
    sample = lasagne.layers.get_output(l_sample,  {l_p_z : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_sample = 1 # adjustable parameter, though 1 works best in practice

    self.n_batch = n_batch
    self.n_lat = 200
    self.n_dim = n_dim
    self.n_chan = n_chan
    self.n_batch = n_batch

    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
        l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
        l_qa, l_qz, l_d  = self.network
    sample = lasagne.layers.get_output(l_px_mu,  {l_qz : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_batch = n_batch
    self.n_lat = 100
    self.n_dim = n_dim
    self.n_chan = n_chan
    self.n_batch = n_batch

    # invoke parent constructor
    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    _, _, _, _, l_sample, l_p_z = self.network
    sample = lasagne.layers.get_output(l_sample,  {l_p_z : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_sample = 1 # adjustable parameter, though 1 works best in practice

    self.n_batch = n_batch
    self.n_lat = 200
    self.n_dim = n_dim
    self.n_chan = n_chan
    self.n_batch = n_batch

    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
        l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
        l_qa, l_qz  = self.network
    sample = lasagne.layers.get_output(l_px_mu,  {l_qz : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def create_objectives(self, deterministic=False):
    # load network
    l_g, l_d = self.network

    # load ouput
    g      = lasagne.layers.get_output(l_g, deterministic=deterministic)
    d_real = lasagne.layers.get_output(l_d, deterministic=deterministic)
    d_fake = lasagne.layers.get_output(l_d, g, deterministic=deterministic)

    # define loss
    loss_g = lasagne.objectives.binary_crossentropy(d_fake, 1).mean()
    loss_d = ( lasagne.objectives.binary_crossentropy(d_real, 1)
             + lasagne.objectives.binary_crossentropy(d_fake, 0) ).mean()

    # compute and store discriminator probabilities
    p_real = (d_real > 0.5).mean()
    p_fake = (d_fake < 0.5).mean()

    return loss_g, loss_d, p_real, p_fake
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
                opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
    # save model that wil be created
    self.model = model
    self.n_sample = 1 # adjustable parameter, though 1 works best in practice

    self.n_batch = n_batch
    self.n_lat = 200
    self.n_dim = n_dim
    self.n_chan = n_chan
    self.n_batch = n_batch

    Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)

    # sample generation
    Z = T.matrix(dtype=theano.config.floatX) # noise matrix
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
        l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
        l_qa, l_qz  = self.network
    sample = lasagne.layers.get_output(l_px_mu,  {l_qz : Z}, deterministic=True)
    self.sample = theano.function([Z], sample, on_unused_input='warn')
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def __init__(self, z_dim, batch_size = 100, lr = 0.0005, b1 =0.5):
        self.z_dim = z_dim
        self.batch_size = batch_size
        self.lr = lr
        self.b1 = b1
        self.z_std = 0.6 # used when z is normal distribution
        print 'depthGAN is initialized with z_dim=%d'%self.z_dim

        # build network
        self.noise_input_layer = lasagne.layers.InputLayer((None, self.z_dim))
        self.gen_depth_layer = \
            self.build_generative(self.noise_input_layer)
        self.depth_shape =\
            lasagne.layers.get_output_shape(self.gen_depth_layer)
        print 'gen build with generated depth shape={}'.format(self.depth_shape)
        self.build_discriminative()
项目:Q-Optimality-Tightening    作者:ShibiHe    | 项目源码 | 文件源码
def build_linear_network(self, input_width, input_height, output_dim,
                             num_frames, batch_size):
        """
        Build a simple linear learner.  Useful for creating
        tests that sanity-check the weight update code.
        """

        l_in = lasagne.layers.InputLayer(
            shape=(None, num_frames, input_width, input_height)
        )

        l_out = lasagne.layers.DenseLayer(
            l_in,
            num_units=output_dim,
            nonlinearity=None,
            W=lasagne.init.Constant(0.0),
            b=None
        )

        return l_out
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def __init__(self):
        self.network = collections.OrderedDict()
        self.network['img'] = InputLayer((None, 3, None, None))
        self.network['seed'] = InputLayer((None, 3, None, None))

        config, params = self.load_model()
        self.setup_generator(self.last_layer(), config)

        if args.train:
            concatenated = lasagne.layers.ConcatLayer([self.network['img'], self.network['out']], axis=0)
            self.setup_perceptual(concatenated)
            self.load_perceptual()
            self.setup_discriminator()
        self.load_generator(params)
        self.compile()

    #------------------------------------------------------------------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
                 W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
        super(Deconv2DLayer, self).__init__(incoming, **kwargs)
        self.target_shape = target_shape
        self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
        self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
        self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
        self.target_shape = target_shape

        self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
        self.W = self.add_param(W, self.W_shape, name="W")
        if b is not None:
            self.b = self.add_param(b, (target_shape[1],), name="b")
        else:
            self.b = None
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
                 W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
        super(Deconv2DLayer, self).__init__(incoming, **kwargs)
        self.target_shape = target_shape
        self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
        self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
        self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
        self.target_shape = target_shape

        self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
        self.W = self.add_param(W, self.W_shape, name="W")
        if b is not None:
            self.b = self.add_param(b, (target_shape[1],), name="b")
        else:
            self.b = None
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def concatenate(net, in_layer, concat_h, concat_vars, pos, nb_concat_features):
    """
    Auxiliary function that checks whether we should concatenate the output of
    a layer `in_layer` of a network `net` to some a tensor in `concat_vars`

    Parameters
    ----------
    net: dictionary containing layers of a network
    in_layer: name of a layer in net
    concat_h: list of layers to concatenate
    concat_vars: list of variables (tensors) to concatenate
    pos: position in lists `concat_h` and `concat_vars` we want to check
    nb_concat_features: number of features in the layer we want to concatenate
    """
    if pos < len(concat_h) and concat_h[pos] == 'input':
        concat_h[pos] = in_layer

    # if this is the layer we want to concatenate, create an InputLayer with the
    # tensor we want to concatenate and a ConcatLayer that does the job afterwards
    if in_layer in concat_h:
        net[in_layer + '_h'] = InputLayer((None, nb_concat_features, None, None), concat_vars[pos])
        net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
                                            net[in_layer]), axis=1, cropping=None)
        pos += 1
        out = in_layer + '_concat'

        laySize = net[out].output_shape
        n_cl = laySize[1]
        print('Number of feature maps (concat):', n_cl)
    else:
        out = in_layer

    if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
        concat_h[pos-1] = 'input'

    return pos, out
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def concatenate_end2end(net, in_layer, concat_h, layer_h, pos, nb_concat_features):
    """
    Auxiliary function that checks whether we should concatenate the output of
    a layer `in_layer` of a network `net` to some a tensor in `concat_vars`

    Parameters
    ----------
    net: dictionary containing layers of a network
    in_layer: name of a layer in net
    concat_h: list of layers to concatenate
    concat_vars: list of variables (tensors) to concatenate
    pos: position in lists `concat_h` and `concat_vars` we want to check
    nb_concat_features: number of features in the layer we want to concatenate
    """
    if pos < len(concat_h) and concat_h[pos] == 'input':
        concat_h[pos] = in_layer

    # if this is the layer we want to concatenate, create an InputLayer with the
    # tensor we want to concatenate and a ConcatLayer that does the job afterwards
    if in_layer in concat_h:
        net[in_layer + '_h'] = layer_h[pos]
        net[in_layer + '_concat'] = ConcatLayer((net[in_layer + '_h'],
                                            net[in_layer]), axis=1, cropping=None)
        pos += 1
        out = in_layer + '_concat'

        laySize = net[out].output_shape
        n_cl = laySize[1]
        print('Number of feature maps (concat):', n_cl)
    else:
        out = in_layer

    if concat_h and pos <= len(concat_h) and concat_h[pos-1] == 'noisy_input':
        concat_h[pos-1] = 'input'

    return pos, out
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def InceptionUpscaleLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs):
    input_args = locals()    
    input_args.pop('num_layers')
    return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()}  

# Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this    
# def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None):
    # return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False)

# Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions.
# This is a holdover from an "old" (i.e. I abandoned it last month) idea.
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def get_params(self, unwrap_shared=True, **tags):
        params = []
        for l in self.layers:
            for p in l.get_params(**tags):
                params.append(p)
        return(params)        
        # params = [p for p in l.get_params(trainable=True) for l in self.layers]
        # return params
        # return [p for p in lay.get_params(unwrap_shared,**tags) for lay in self.layers]
        # return lasagne.layers.get_all_params(self.final_layer,trainable=True)
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def load_data(self):
        """Open the serialized parameters from a pre-trained network, and load them into the model created.
        """
        vgg19_file = os.path.join(os.path.dirname(__file__), 'vgg19_conv.pkl.bz2')
        if not os.path.exists(vgg19_file):
            error("Model file with pre-trained convolution layers not found. Download here...",
                  "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2")

        data = pickle.load(bz2.open(vgg19_file, 'rb'))
        params = lasagne.layers.get_all_param_values(self.network['main'])
        lasagne.layers.set_all_param_values(self.network['main'], data[:len(params)])
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def setup(self, layers):
        """Setup the inputs and outputs, knowing the layers that are required by the optimization algorithm.
        """
        self.tensor_img = T.tensor4()
        self.tensor_map = T.tensor4()
        tensor_inputs = {self.network['img']: self.tensor_img, self.network['map']: self.tensor_map}
        outputs = lasagne.layers.get_output([self.network[l] for l in layers], tensor_inputs)
        self.tensor_outputs = {k: v for k, v in zip(layers, outputs)}
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def get_outputs(self, type, layers):
        """Fetch the output tensors for the network layers.
        """
        return [self.tensor_outputs[type+l] for l in layers]
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def do_extract_patches(self, layers, size=3, stride=1):
        """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches
        from the intermediate outputs in the model.
        """
        results = []
        for l, f in layers:
            # Use a Theano helper function to extract "neighbors" of specific size, seems a bit slower than doing
            # it manually but much simpler!
            patches = theano.tensor.nnet.neighbours.images2neibs(f, (size, size), (stride, stride), mode='valid')
            # Make sure the patches are in the shape required to insert them into the model as another layer.
            patches = patches.reshape((-1, patches.shape[0] // f.shape[1], size, size)).dimshuffle((1, 0, 2, 3))
            # Calculate the magnitude that we'll use for normalization at runtime, then store...
            results.extend([patches] + self.compute_norms(T, l, patches))
        return results
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def set_params(self, params):
        lasagne.layers.set_all_param_values(self.l_out, params)
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def create_net(config, **kwargs):
    args = {
        'layers': config.layers,
        'batch_iterator_train': iterator.ResampleIterator(
            config, batch_size=config.get('batch_size_train')),
        'batch_iterator_test': iterator.SharedIterator(
            config, deterministic=True,
            batch_size=config.get('batch_size_test')),
        'on_epoch_finished': [
            Schedule('update_learning_rate', config.get('schedule'),
                     weights_file=config.final_weights_file),
            SaveBestWeights(weights_file=config.weights_file,
                            loss='kappa', greater_is_better=True,),
            SaveWeights(config.weights_epoch, every_n_epochs=5),
            SaveWeights(config.weights_best, every_n_epochs=1, only_best=True),
        ],
        'objective': get_objective(),
        'use_label_encoder': False,
        'eval_size': 0.1,
        'regression': False,
        'max_epochs': 1000,
        'verbose': 1,
        'update_learning_rate': theano.shared(
            util.float32(config.get('schedule')[0])),
        'update': nesterov_momentum,
        'update_momentum': 0.1,
        'custom_scores': [('kappa', util.kappa)],
    }
    args.update(kwargs)
    net = Net(**args)
    return net
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def __init__(self, incomings, name=None):  # incomings is a list (tuple) of 2 layers. The second is the "selector"
        super(BilinearIntegrationLayer, self).__init__(incomings, name)
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def load_model(net, layer='fc8'):
    model_values = utils.PickleLoad(os.path.join(model_dir, 'caffe_reference_%s.pkl' % layer))
    lasagne.layers.set_all_param_values(net[layer], model_values)