Python lasagne.init 模块,Constant() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用lasagne.init.Constant()

项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_filters, filter_size, stride=1,
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, flip_filters=True,
                 convolution=conv.conv1d_mc0, **kwargs):
        if isinstance(incoming, tuple):
            input_shape = incoming
        else:
            input_shape = incoming.output_shape

        # Retrieve the supplied name, if it exists; otherwise use ''
        if 'name' in kwargs:
            basename = kwargs['name'] + '.'
            # Create a separate version of kwargs for the contained layers
            # which does not include 'name'
            layer_kwargs = dict((key, arg) for key, arg in kwargs.items() if key != 'name')
        else:
            basename = ''
            layer_kwargs = kwargs
        self.conv1d = Conv1DLayer(InputLayer((None,) + input_shape[2:]), num_filters, filter_size, stride, pad,
                                  untie_biases, W, b, nonlinearity, flip_filters, convolution, name=basename + "conv1d",
                                  **layer_kwargs)
        self.W = self.conv1d.W
        self.b = self.conv1d.b
        super(ConvTimeStep1DLayer, self).__init__(incoming, **kwargs)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(ChainCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_labels, mask_input=None, U=init.GlorotUniform(), W_h=init.GlorotUniform(),
                 W_c=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeBiAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.U = self.add_param(U, (dim_inputs, dim_inputs, self.num_labels), name='U')
        self.W_h = None if W_h is None else self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')
        self.W_c = None if W_c is None else self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, max_steps, peepholes=False, mask_input=None, **kwargs):
        """
        initialization
        :param incoming: bidirectional mLSTM for passane
        :param num_units:
        :param max_steps: max num steps to generate answer words, can be tensor scalar variable
        :param peepholes:
        :param mask_input: passage's length mask
        :param kwargs:
        """
        super(AnsPointerLayer, self).__init__(incoming, num_units, peepholes=peepholes,
                                              precompute_input=False, mask_input=mask_input,
                                              only_return_final=False, **kwargs)
        self.max_steps = max_steps
        # initializes attention weights
        input_shape = self.input_shapes[0]
        num_inputs = np.prod(input_shape[2:])
        self.V_pointer = self.add_param(init.Normal(0.1), (num_inputs, num_units), 'V_pointer')
        # doesn't need transpose
        self.v_pointer = self.add_param(init.Normal(0.1), (num_units, 1), 'v_pointer')
        self.W_a_pointer = self.add_param(init.Normal(0.1), (num_units, num_units), 'W_a_pointer')
        self.b_a_pointer = self.add_param(init.Constant(0.), (1, num_units), 'b_a_pointer')
        self.c_pointer = self.add_param(init.Constant(0.), (1, 1), 'c_pointer')
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(CustomDense, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[-1]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, max_steps, peepholes=False, mask_input=None, **kwargs):
        """
        initialization
        :param incoming: bidirectional mLSTM for passane
        :param num_units:
        :param max_steps: max num steps to generate answer words, can be tensor scalar variable
        :param peepholes:
        :param mask_input: passage's length mask
        :param kwargs:
        """
        super(AnsPointerLayer, self).__init__(incoming, num_units, peepholes=peepholes,
                                              precompute_input=False, mask_input=mask_input,
                                              only_return_final=False, **kwargs)
        self.max_steps = max_steps
        # initializes attention weights
        input_shape = self.input_shapes[0]
        num_inputs = np.prod(input_shape[2:])
        self.V_pointer = self.add_param(init.Normal(0.1), (num_inputs, num_units), 'V_pointer')
        # doesn't need transpose
        self.v_pointer = self.add_param(init.Normal(0.1), (num_units, 1), 'v_pointer')
        self.W_a_pointer = self.add_param(init.Normal(0.1), (num_units, num_units), 'W_a_pointer')
        self.b_a_pointer = self.add_param(init.Constant(0.), (num_units, ), 'b_a_pointer')
        c_pointer = theano.shared(np.array([0.], dtype='float32'), name='c_pointer', broadcastable=(True, ))
        self.c_pointer = self.add_param(c_pointer, (1,), 'c_pointer')
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
                 b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(DepParserLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        num_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')

        self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(CRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def __init__(self, incoming, num_styles=None, epsilon=1e-4,
                 beta=Constant(0), gamma=Constant(1), **kwargs):
        super(InstanceNormLayer, self).__init__(incoming, **kwargs)

        self.axes = (2, 3)
        self.epsilon = epsilon

        if num_styles == None:
            shape = (self.input_shape[1],)
        else:
            shape = (num_styles, self.input_shape[1])

        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
项目:time_lstm    作者:DarryO    | 项目源码 | 文件源码
def __init__(self, W_in=init.Normal(0.1), W_hid=init.Normal(0.1),
                 W_cell=init.Normal(0.1), W_to=init.Normal(0.1),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.sigmoid):
        self.W_in = W_in
        self.W_hid = W_hid
        self.W_to = W_to
        # Don't store a cell weight vector when cell is None
        if W_cell is not None:
            self.W_cell = W_cell
        self.b = b
        # For the nonlinearity, if None is supplied, use identity
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def __init__(self, incomings, num_units, nonlinearity=nonlinearities.sigmoid,
               W=init.Uniform(), b = init.Constant(0.0), **kwargs):
    super(MergeDense, self).__init__(incomings=incomings, **kwargs)

    self.num_units = num_units

    self.input_shapes = [ inc.output_shape for inc in incomings ]

    self.weights = [
      self.get_weights(W, shape=input_shape, name='W%d' % i)
      for i, input_shape in enumerate(self.input_shapes)
    ]

    self.b = self.add_param(b, (self.num_units,), name="b", regularizable=False)

    self.nonlinearity = nonlinearity
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming, num_units,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, name=None, **kwargs):
        """
        An extention of a regular dense layer, enables the sharing of weight between two tied hidden layers. In order
        to tie two layers, the first should be initialized with an initialization function for the weights, the other
        should get the weight matrix of the first at input
        :param incoming: the input layer of this layer
        :param num_units: output size
        :param W: weight initialization, can be a initialization function or a given matrix
        :param b: bias initialization
        :param nonlinearity: non linearity function
        :param name: string
        :param kwargs:
        """
        super(TiedDenseLayer, self).__init__(incoming, num_units, W, b, nonlinearity, name=name)

        if not isinstance(W, lasagne.init.Initializer):
            self.params[self.W].remove('trainable')
            self.params[self.W].remove('regularizable')

        if self.b and not isinstance(b, lasagne.init.Initializer):
            self.params[self.b].remove('trainable')
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def conv_params(num_filters, filter_size=(3, 3), stride=(1, 1), border_mode='same',
         nonlinearity=rectify, W=init.Orthogonal(gain=1.0),
         b=init.Constant(0.05), untie_biases=False, **kwargs):
    args = {
        'num_filters': num_filters,
        'filter_size': filter_size,
        'stride': stride,
        'pad': border_mode,         # The new version has 'pad' instead of 'border_mode'
        'nonlinearity': nonlinearity,
        'W': W,
        'b': b,
        'untie_biases': untie_biases,
    }
    args.update(kwargs)
    return args
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def dense_params(num_units, nonlinearity=rectify, **kwargs):
    args = {
        'num_units': num_units,
        'nonlinearity': nonlinearity,
        'W': init.Orthogonal(1.0),
        'b': init.Constant(0.05),
    }
    args.update(kwargs)
    return args
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, ingate=Gate(), forgetgate=Gate(),
                 cell=Gate(W_cell=None, nonlinearity=nonlinearities.tanh), outgate=Gate(),
                 nonlinearity=nonlinearities.tanh, cell_init=init.Constant(0.), hid_init=init.Constant(0.),
                 backwards=False, learn_init=False, peepholes=True, gradient_steps=-1, grad_clipping=0,
                 unroll_scan=False, precompute_input=True, mask_input=None, **kwargs):
        super(CustomLSTMEncoder, self).__init__(incoming, num_units, ingate, forgetgate, cell, outgate, nonlinearity,
                                                cell_init, hid_init, backwards, learn_init, peepholes, gradient_steps,
                                                grad_clipping, unroll_scan, precompute_input, mask_input, False,
                                                **kwargs)
项目:began    作者:davidtellez    | 项目源码 | 文件源码
def get_b(network, layer_name):
    if (network is not None) and (layer_name in network):
        b = network[layer_name].b
    else:
        b = Constant(0.)  # default value in Lasagne
    return b
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
                 mode='low_mem', beta=init.Constant(0), gamma=init.Constant(1),
                 mean=init.Constant(0), inv_std=init.Constant(1), **kwargs):
        super(BatchNormLayer, self).__init__(incoming, **kwargs)

        if axes == 'auto':
            # default: normalize over all but the second axis
            axes = (0,) + tuple(range(2, len(self.input_shape)))
        elif isinstance(axes, int):
            axes = (axes,)
        self.axes = axes

        self.epsilon = epsilon
        self.alpha = alpha
        self.mode = mode

        # create parameters, ignoring all dimensions in axes
        shape = [size for axis, size in enumerate(self.input_shape)
                 if axis not in self.axes]
        if any(size is None for size in shape):
            raise ValueError("BatchNormLayer needs specified input sizes for "
                             "all axes not normalized over.")
        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
        self.mean = self.add_param(mean, shape, 'mean',
                                   trainable=False, regularizable=False)
        self.inv_std = self.add_param(inv_std, shape, 'inv_std',
                                      trainable=False, regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(AttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:DynamicMemoryNetworks    作者:swstarlab    | 项目源码 | 文件源码
def __init__(self, W_in=Normal(0.1), W_hid=Normal(0.1),
                 b=Constant(0.), nonlinearity=nonlin.sigmoid):
        self.W_in  = W_in
        self.W_hid = W_hid
        self.b     = b
        if nonlinearity is None:
            self.nonlinearity = nonlin.identity
        else:
            self.nonlinearity = nonlinearity
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1, **kwargs):
        super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "leaving no trailing axes for the dot product." %
                    (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "requesting more trailing axes than there are input "
                    "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                    "A DenseLayer requires a fixed input shape (except for "
                    "the leading axes). Got %r for num_leading_axes=%d." %
                    (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     (num_inputs, num_units), name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     (num_inputs, num_units), name="L2")
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor4LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1],
                    self.input_shape[2], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor3LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def __init__(self, incoming, num_centers,
                 locs=init.Normal(std=1), log_sigma=init.Constant(0.),
                 **kwargs):
        super(RBFLayer, self).__init__(incoming, **kwargs)
        self.num_centers = num_centers

        assert len(self.input_shape) == 2
        in_dim = self.input_shape[1]
        self.locs = self.add_param(locs, (num_centers, in_dim), name='locs',
                                   regularizable=False)
        self.log_sigma = self.add_param(log_sigma, (), name='log_sigma')
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def __init__(self, incoming, num_freqs,
                 freqs=init.Normal(std=1), log_sigma=init.Constant(0.),
                 **kwargs):
        super(SmoothedCFLayer, self).__init__(incoming, **kwargs)
        self.num_freqs = num_freqs

        assert len(self.input_shape) == 2
        in_dim = self.input_shape[1]
        self.freqs = self.add_param(freqs, (num_freqs, in_dim), name='freqs')
        self.log_sigma = self.add_param(log_sigma, (), name='log_sigma')
项目:LSTM-and-maxlayer-for-SNV-based-phenotype-prediction    作者:widmi    | 项目源码 | 文件源码
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
        super(InputDropoutLayer, self).__init__(incoming, **kwargs)
        self.p = p
        self.rescale = rescale
        constant_dim = 1
        self.constant_dim = constant_dim
        self.dropoutshape = (self.input_shape[:constant_dim]) + (self.input_shape[constant_dim+1:])
        self.dropoutlayer = lasagne.layers.DropoutLayer(incoming=self.dropoutshape, p=p, rescale=rescale, **kwargs)

        # add parameters to this layer
        self.params.update(self.dropoutlayer.params)

        self.dropoutmask = self.add_param(init.Constant(1), 
                                          self.dropoutshape, 'dropoutmask',
                                          trainable=False, regularizable=False)
项目:baal    作者:braingineer    | 项目源码 | 文件源码
def make_b(self, size, name):
        P = create_param(Constant(0.), (size, ), name=name)
        self.parameters[name] = P
        return P

    ######################
    ### layer management
    ######################
项目:kaggle-quora-solution-8th    作者:qqgeogor    | 项目源码 | 文件源码
def __init__(self, incoming, num_units,
                 ingate=Gate(),
                 forgetgate=Gate(),
                 cell=Gate(W_cell=None, nonlinearity=nonlinearities.tanh),
                 outgate=Gate(),
                 nonlinearity=nonlinearities.tanh,
                 cell_init=init.Constant(0.),
                 hid_init=init.Constant(0.),
                 backwards=False,
                 learn_init=False,
                 peepholes=True,
                 gradient_steps=-1,
                 grad_clipping=0,
                 unroll_scan=False,
                 precompute_input=True,
                 mask_input=None,
                 only_return_final=False,
                 inter_drop=0.05,
                 **kwargs):
        super(DropLSTMLayer, self).__init__(incoming, num_units,
                                            ingate, forgetgate, cell, outgate,
                                            nonlinearity, cell_init, hid_init,
                                            backwards, learn_init, peepholes,
                                            gradient_steps, grad_clipping, unroll_scan,
                                            precompute_input, mask_input, only_return_final, **kwargs)
        self.inter_retain_prob = 1 - inter_drop
        self._srng = RandomStreams(
            lasagne.random.get_rng().randint(1, 2147462579))
项目:VAESSL    作者:lovecambi    | 项目源码 | 文件源码
def __init__(self, incoming, axes='auto', epsilon=1e-6, alpha=1e-2,
                 beta=init.Constant(0), gamma=init.Constant(1),
                 mean=init.Constant(0), std=init.Constant(1), **kwargs):
        super(MyBatchNormLayer, self).__init__(incoming, **kwargs)

        if axes == 'auto':
            # default: normalize over all but the second axis
            axes = (0,) + tuple(range(2, len(self.input_shape)))
        elif isinstance(axes, int):
            axes = (axes,)
        self.axes = axes

        self.epsilon = epsilon
        self.alpha = alpha

        # create parameters, ignoring all dimensions in axes
        shape = [size for axis, size in enumerate(self.input_shape)
                 if axis not in self.axes]
        if any(size is None for size in shape):
            raise ValueError("BatchNormLayer needs specified input sizes for "
                             "all axes not normalized over.")
        if beta is None:
            self.beta = None
        else:
            self.beta = self.add_param(beta, shape, 'beta',
                                       trainable=True, regularizable=False)
        if gamma is None:
            self.gamma = None
        else:
            self.gamma = self.add_param(gamma, shape, 'gamma',
                                        trainable=True, regularizable=True)
        self.mean = self.add_param(mean, shape, 'mean',
                                   trainable=False, regularizable=False)
        self.std = self.add_param(std, shape, 'std',
                                      trainable=False, regularizable=False)
项目:aed-by-cnn    作者:tweihaha    | 项目源码 | 文件源码
def __init__(self, incoming, filter_size,
                 init_std=5., W_logstd=None,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        if W_logstd is None:
            init_std = np.asarray(init_std, dtype=floatX)
            W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False)
        self.W = self.make_gaussian_filter()
项目:aed-by-cnn    作者:tweihaha    | 项目源码 | 文件源码
def __init__(self, incoming, filter_size, init_std=5.,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(FixedGaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        init_std = np.asarray(init_std, dtype=floatX)
        W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False,
                                       trainable=False)
        self.W = self.make_gaussian_filter()
项目:rllab    作者:rll    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:time_lstm    作者:DarryO    | 项目源码 | 文件源码
def __init__(self, W_t=init.Normal(0.1), W_x=init.Normal(0.1),
            b=init.Constant(0.),
            nonlinearity_inside=nonlinearities.tanh,
            nonlinearity_outside=nonlinearities.sigmoid):
        self.W_t = W_t
        self.W_x = W_x
        self.b = b
        self.nonlinearity_inside = nonlinearity_inside
        self.nonlinearity_outside = nonlinearity_outside
项目:time_lstm    作者:DarryO    | 项目源码 | 文件源码
def __init__(self,
                 Period=init.Uniform((10,100)),
                 Shift=init.Uniform( (0., 1000.)),
                 On_End=init.Constant(0.05)):
        self.Period = Period
        self.Shift = Shift
        self.On_End = On_End
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(NormalizedAttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, C=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
        super(AttentionLayer, self).__init__(incoming, **kwargs)
        self.n_slots = n_slots
        num_inputs = int(np.prod(self.input_shape[1:]))
        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming,
                 gamma=init.Uniform([0.95, 1.05]),
                 beta=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 epsilon=0.001,
                 **kwargs):
        super(BatchNormalizationLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_units = int(numpy.prod(self.input_shape[1:]))
        self.gamma = self.add_param(gamma, (self.num_units,), name="BatchNormalizationLayer:gamma", regularizable=True,
                                    gamma=True, trainable=True)
        self.beta = self.add_param(beta, (self.num_units,), name="BatchNormalizationLayer:beta", regularizable=False)
        self.epsilon = epsilon

        self.mean_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.mean_inference.name = "shared:mean"

        self.variance_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.variance_inference.name = "shared:variance"