Python lasagne.nonlinearities 模块,identity() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用lasagne.nonlinearities.identity()

项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(CustomDense, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[-1]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, x_pre, h_m, nonlinearity_final=nonlinearities.identity, **kwargs):

        super(SimpleCompositionLayer, self).__init__([x_pre, h_m], **kwargs)

        self.nonlinearity_final = nonlinearity_final

        #self.num_units = num_units

        #num_inputs = int(np.prod(self.input_shapes[0][1:]))

        #self.W = self.add_param(W, (num_inputs, num_units), name="W")
        #if b is None:
        #    self.b = None
        #else:
        #    self.b = self.add_param(b, (num_units,), name="b",
        #                            regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, x_pre, h_m, nonlinearity_final=nonlinearities.identity, **kwargs):

        super(SimpleCompositionLayer, self).__init__([x_pre, h_m], **kwargs)

        self.nonlinearity_final = nonlinearity_final

        #self.num_units = num_units

        #num_inputs = int(np.prod(self.input_shapes[0][1:]))

        #self.W = self.add_param(W, (num_inputs, num_units), name="W")
        #if b is None:
        #    self.b = None
        #else:
        #    self.b = self.add_param(b, (num_units,), name="b",
        #                            regularizable=False)
项目:time_lstm    作者:DarryO    | 项目源码 | 文件源码
def __init__(self, W_in=init.Normal(0.1), W_hid=init.Normal(0.1),
                 W_cell=init.Normal(0.1), W_to=init.Normal(0.1),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.sigmoid):
        self.W_in = W_in
        self.W_hid = W_hid
        self.W_to = W_to
        # Don't store a cell weight vector when cell is None
        if W_cell is not None:
            self.W_cell = W_cell
        self.b = b
        # For the nonlinearity, if None is supplied, use identity
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, C=init.GlorotUniform(), M=init.Normal(),
                 b=init.Constant(0.), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots), name="C") # controller
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots,), name="b",
                                    regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, x_pre, h_m, nonlinearity_final=nonlinearities.identity, **kwargs):

        super(SimpleCompositionLayer, self).__init__([x_pre, h_m], **kwargs)

        self.nonlinearity_final = nonlinearity_final

        #self.num_units = num_units

        #num_inputs = int(np.prod(self.input_shapes[0][1:]))

        #self.W = self.add_param(W, (num_inputs, num_units), name="W")
        #if b is None:
        #    self.b = None
        #else:
        #    self.b = self.add_param(b, (num_units,), name="b",
        #                            regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, x_pre, h_m, nonlinearity_final=nonlinearities.identity, **kwargs):

        super(SimpleCompositionLayer, self).__init__([x_pre, h_m], **kwargs)

        self.nonlinearity_final = nonlinearity_final

        #self.num_units = num_units

        #num_inputs = int(np.prod(self.input_shapes[0][1:]))

        #self.W = self.add_param(W, (num_inputs, num_units), name="W")
        #if b is None:
        #    self.b = None
        #else:
        #    self.b = self.add_param(b, (num_units,), name="b",
        #                            regularizable=False)
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def __init__(self, incoming, nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(NonlinearityLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, M=init.Normal(), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(SeparateMemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
项目:DynamicMemoryNetworks    作者:swstarlab    | 项目源码 | 文件源码
def __init__(self, W_in=Normal(0.1), W_hid=Normal(0.1),
                 b=Constant(0.), nonlinearity=nonlin.sigmoid):
        self.W_in  = W_in
        self.W_hid = W_hid
        self.b     = b
        if nonlinearity is None:
            self.nonlinearity = nonlin.identity
        else:
            self.nonlinearity = nonlinearity
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1, **kwargs):
        super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "leaving no trailing axes for the dot product." %
                    (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "requesting more trailing axes than there are input "
                    "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                    "A DenseLayer requires a fixed input shape (except for "
                    "the leading axes). Got %r for num_leading_axes=%d." %
                    (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     (num_inputs, num_units), name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     (num_inputs, num_units), name="L2")
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def __init__(self, W_g=init.Normal(0.1), W_s=init.Normal(0.1),
                 W_h=init.Normal(0.1), W_v=init.Normal(0.1),
                 nonlinearity=nonlinearities.softmax):
        self.W_s = W_s
        self.W_h = W_h
        self.W_g = W_g
        self.W_v = W_v
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
项目:gelato    作者:ferrine    | 项目源码 | 文件源码
def test_workflow(self):
        inp = InputLayer(self.x.shape)
        out = DenseLayer(inp, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
        out = DenseLayer(out, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
        assert out.root is inp
        with out:
            pm.Normal('y', mu=get_output(out),
                      sd=self.sd,
                      observed=self.y)
项目:aed-by-cnn    作者:tweihaha    | 项目源码 | 文件源码
def __init__(self, incoming, filter_size,
                 init_std=5., W_logstd=None,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        if W_logstd is None:
            init_std = np.asarray(init_std, dtype=floatX)
            W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False)
        self.W = self.make_gaussian_filter()
项目:aed-by-cnn    作者:tweihaha    | 项目源码 | 文件源码
def __init__(self, incoming, filter_size, init_std=5.,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(FixedGaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        init_std = np.asarray(init_std, dtype=floatX)
        W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False,
                                       trainable=False)
        self.W = self.make_gaussian_filter()
项目:rllab    作者:rll    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:MEM_DGM    作者:zhenxuan00    | 项目源码 | 文件源码
def __init__(self, incoming, n_slots, d_slots, M=init.Normal(), nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(SeparateMemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots
        self.M = self.add_param(M, (n_slots, d_slots), name="M") # memory slots
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming,
                 gamma=init.Uniform([0.95, 1.05]),
                 beta=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 epsilon=0.001,
                 **kwargs):
        super(BatchNormalizationLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_units = int(numpy.prod(self.input_shape[1:]))
        self.gamma = self.add_param(gamma, (self.num_units,), name="BatchNormalizationLayer:gamma", regularizable=True,
                                    gamma=True, trainable=True)
        self.beta = self.add_param(beta, (self.num_units,), name="BatchNormalizationLayer:beta", regularizable=False)
        self.epsilon = epsilon

        self.mean_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.mean_inference.name = "shared:mean"

        self.variance_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.variance_inference.name = "shared:variance"
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, cell_num, W=lasagne.init.GlorotUniform(),
                 b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 name=None, **kwargs):
        super(LocallyDenseLayer, self).__init__(incoming, name)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = int(np.prod(self.input_shape[1:]))
        self.cell_input_size = num_inputs / cell_num
        self.cell_size = self.num_units / cell_num

        if isinstance(W, lasagne.init.Initializer):
            W = [W for i in range(0, cell_num)]

        if isinstance(b, lasagne.init.Initializer):
            b = [b for i in range(0, cell_num)]

        self._dense_layers = []
        self.W = []
        self.b = []

        # Creating m number of tied dense layers
        for i in range(cell_num):
            self._dense_layers.append(TiedDenseLayer(CutLayer(incoming, cell_num),
                                                     self.cell_size, W[i], b[i], nonlinearity, **kwargs))

            self.W.append(self._dense_layers[-1].W)
            self.b.append(self._dense_layers[-1].b)
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def batch_norm(layer, **kwargs):
    """
    Apply batch normalization to an existing layer. This is a convenience
    function modifying an existing layer to include batch normalization: It
    will steal the layer's nonlinearity if there is one (effectively
    introducing the normalization right before the nonlinearity), remove
    the layer's bias if there is one (because it would be redundant), and add
    a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top.
    Parameters
    ----------
    layer : A :class:`Layer` instance
        The layer to apply the normalization to; note that it will be
        irreversibly modified as specified above
    **kwargs
        Any additional keyword arguments are passed on to the
        :class:`BatchNormLayer` constructor.
    Returns
    -------
    BatchNormLayer or NonlinearityLayer instance
        A batch normalization layer stacked on the given modified `layer`, or
        a nonlinearity layer stacked on top of both if `layer` was nonlinear.
    Examples
    --------
    Just wrap any layer into a :func:`batch_norm` call on creating it:
    >>> from lasagne.layers import InputLayer, DenseLayer, batch_norm
    >>> from lasagne.nonlinearities import tanh
    >>> l1 = InputLayer((64, 768))
    >>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh))
    This introduces batch normalization right before its nonlinearity:
    >>> from lasagne.layers import get_all_layers
    >>> [l.__class__.__name__ for l in get_all_layers(l2)]
    ['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer']
    """
    nonlinearity = getattr(layer, 'nonlinearity', None)
    if nonlinearity is not None:
        layer.nonlinearity = nonlinearities.identity
    if hasattr(layer, 'b') and layer.b is not None:
        del layer.params[layer.b]
        layer.b = None
    layer = BatchNormLayer(layer, **kwargs)
    if nonlinearity is not None:
        from lasagne.layers import NonlinearityLayer
        layer = NonlinearityLayer(layer, nonlinearity)
    return layer
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def __init__(self, args, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 convolution=T.nnet.conv2d, **kwargs):
        super(Conv2DLayerWithReg, self).__init__(incoming, **kwargs)

        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = _as_tuple(filter_size, 2)
        self.stride = _as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.convolution = convolution

        if pad == 'valid':
            self.pad = (0, 0)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = _as_tuple(pad, 2, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2], self.
                                output_shape[3])
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     self.get_W_shape() , name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     self.get_W_shape() , name="L2")
项目:aed-by-cnn    作者:tweihaha    | 项目源码 | 文件源码
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 convolution=T.nnet.conv2d, **kwargs):
        super(Conv2DXLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.convolution = convolution

        if pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        if pad == 'strictsamex':
            if not (stride == 1 or stride == (1, 1)):
                raise NotImplementedError(
                    '`strictsamex` padding requires stride=(1, 1) or 1')

        if pad == 'valid':
            self.pad = (0, 0)
        elif pad in ('full', 'same', 'strictsamex'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 2, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2], self.
                                output_shape[3])
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)