Python theano.tensor.nnet 模块,sigmoid() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用theano.tensor.nnet.sigmoid()

项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        self.p_dropout = p_dropout
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_in), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:machine-deep_learning    作者:Charleswyt    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        self.p_dropout = p_dropout
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:neural-networks-and-deep-learning    作者:skylook    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        self.p_dropout = p_dropout
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout(T.ones_like(input_data[0]))
        mh = self.dropout(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout1(T.ones_like(input_data[0]))
        mh = self.dropout2(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __call__(self, input):

        b_size = input.shape[0]

        input_data = input[:(b_size/2)]
        initial = input[(b_size/2):]

        input_data = input_data.dimshuffle(2, 0, 1)
        initial = initial.dimshuffle(2, 0, 1)

        me = self.dropout(T.ones_like(input_data[0]))
        mh = self.dropout(T.ones_like(self.encoder(input_data[0])))   

        def step(e, h, me, mh):
            ig = sigmoid(self.encode_igate(me * e) + self.recode_igate(mh * h))
            fg = sigmoid(self.encode_fgate(me * e) + self.recode_fgate(mh * h))
            return self.activation(fg * self.recoder(mh * h) + ig * self.encoder(me * e))

        h = theano.scan(step, sequences=[input_data, initial], non_sequences=[me, mh], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
项目:deep-murasaki    作者:lazydroid    | 项目源码 | 文件源码
def get_update(Ws_s, bs_s):
    x, fx = train.get_model(Ws_s, bs_s)

    # Ground truth (who won)
    y = T.vector('y')

    # Compute loss (just log likelihood of a sigmoid fit)
    y_pred = sigmoid(fx)
    loss = -( y * T.log(y_pred) + (1 - y) * T.log(1 - y_pred)).mean()

    # Metrics on the number of correctly predicted ones
    frac_correct = ((fx > 0) * y + (fx < 0) * (1 - y)).mean()

    # Updates
    learning_rate_s = T.scalar(dtype=theano.config.floatX)
    momentum_s = T.scalar(dtype=theano.config.floatX)
    updates = train.nesterov_updates(loss, Ws_s + bs_s, learning_rate_s, momentum_s)

    f_update = theano.function(
        inputs=[x, y, learning_rate_s, momentum_s],
        outputs=[loss, frac_correct],
        updates=updates,
        )

    return f_update
项目:DeepLearningPython35    作者:MichalDanielDobrzanski    | 项目源码 | 文件源码
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
        self.n_in = n_in
        self.n_out = n_out
        self.activation_fn = activation_fn
        self.p_dropout = p_dropout
        # Initialize weights and biases
        self.w = theano.shared(
            np.asarray(
                np.random.normal(
                    loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
                dtype=theano.config.floatX),
            name='w', borrow=True)
        self.b = theano.shared(
            np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
                       dtype=theano.config.floatX),
            name='b', borrow=True)
        self.params = [self.w, self.b]
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def set_output(self):
        self._output = sigmoid(self._prev_layer.output)
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def __init__(self, filter_shape, image_shape, border_mode='half',
                 stride=(1, 1), activation_fn=sigmoid):
        """`filter_shape` is a tuple of length 4, whose entries are the number
        of filters, the number of input feature maps, the filter height, and
        the filter width.

        `image_shape` is a tuple of length 4, whose entries are the
        mini-batch size, the number of input feature maps, the image
        height, and the image width.

        """
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.border_mode = border_mode
        self.stride = stride
        self.activation_fn = activation_fn

        # initialize weights and biases
        n_in = np.prod(filter_shape[1:])  # Total number of input params
        # n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
        self.w = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=np.sqrt(1.0/n_in), size=filter_shape),
                dtype=theano.config.floatX),
            borrow=True)
        self.b = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
                dtype=theano.config.floatX),
            borrow=True)
        self.params = [self.w, self.b]
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def __init__(self, filter_shape, image_shape, border_mode='half',
                 stride=(1, 1), poolsize=(2, 2), activation_fn=sigmoid):
        """`filter_shape` is a tuple of length 4, whose entries are the number
        of filters, the number of input feature maps, the filter height, and
        the filter width.

        `image_shape` is a tuple of length 4, whose entries are the
        mini-batch size, the number of input feature maps, the image
        height, and the image width.

        `poolsize` is a tuple of length 2, whose entries are the y and
        x pooling sizes.

        """
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.border_mode = border_mode
        self.stride = stride
        self.poolsize = poolsize
        self.activation_fn = activation_fn

        # initialize weights and biases
        n_in = np.prod(filter_shape[1:])  # Total number of input params
        # n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
        self.w = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=np.sqrt(1.0/n_in), size=filter_shape),
                dtype=theano.config.floatX),
            borrow=True)
        self.b = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
                dtype=theano.config.floatX),
            borrow=True)
        self.params = [self.w, self.b]
项目:machine-deep_learning    作者:Charleswyt    | 项目源码 | 文件源码
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
                 activation_fn=sigmoid):
        """`filter_shape` is a tuple of length 4, whose entries are the number
        of filters, the number of input feature maps, the filter height, and the
        filter width.

        `image_shape` is a tuple of length 4, whose entries are the
        mini-batch size, the number of input feature maps, the image
        height, and the image width.

        `poolsize` is a tuple of length 2, whose entries are the y and
        x pooling sizes.

        """
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.poolsize = poolsize
        self.activation_fn=activation_fn
        # initialize weights and biases
        n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
        self.w = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
                dtype=theano.config.floatX),
            borrow=True)
        self.b = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
                dtype=theano.config.floatX),
            borrow=True)
        self.params = [self.w, self.b]
项目:NNProject_DeepMask    作者:abbypa    | 项目源码 | 文件源码
def layer(x, w):
    b = np.array([1], dtype=theano.config.floatX)
    new_x = T.concatenate([x, b])
    m = T.dot(w.T, new_x)
    h = nnet.sigmoid(m)
    return h
项目:neural-networks-and-deep-learning    作者:skylook    | 项目源码 | 文件源码
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
                 activation_fn=sigmoid):
        """`filter_shape` is a tuple of length 4, whose entries are the number
        of filters, the number of input feature maps, the filter height, and the
        filter width.

        `image_shape` is a tuple of length 4, whose entries are the
        mini-batch size, the number of input feature maps, the image
        height, and the image width.

        `poolsize` is a tuple of length 2, whose entries are the y and
        x pooling sizes.

        """
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.poolsize = poolsize
        self.activation_fn=activation_fn
        # initialize weights and biases
        n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
        self.w = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
                dtype=theano.config.floatX),
            borrow=True)
        self.b = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
                dtype=theano.config.floatX),
            borrow=True)
        self.params = [self.w, self.b]
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __call__(self, input):

        input = input.dimshuffle(2, 0, 1)
        initial = self.initial.dimshuffle(2, 0, 1)

        def step(e, h):
            ig = sigmoid(self.encode_igate(e) + self.recode_igate(h))
            fg = sigmoid(self.encode_fgate(e) + self.recode_fgate(h))
            return self.activation(fg * self.recoder(h) + ig * self.encoder(e))

        h = theano.scan(step, sequences=[input, initial], outputs_info=None)[0]

        return h.dimshuffle(1, 2, 0)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def sigmoid(gain=1, spread=1, mode='hard'):  # Possible modes: 'fast', 'full', 'hard'.
    if mode == 'fast':
        return lambda x: gain * nnet.ultra_fast_sigmoid(spread * x)
    elif mode == 'full':
        return lambda x: gain * nnet.sigmoid(spread * x)
    elif mode == 'hard':
        return lambda x: gain * nnet.hard_sigmoid(spread * x)
    else:
        return lambda x: gain * nnet.ultra_fast_sigmoid(spread * x)


# Linear (duh)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def he(shape, gain=1., dtype=th.config.floatX):
    if len(shape) == 4:
        # 2D convolutions
        fmapsin = shape[1]
        fov = np.prod(shape[2:])
    elif len(shape) == 5:
        # 3D convolutions
        fmapsin = shape[2]
        fov = np.prod(shape[3:]) * shape[1]
    else:
        raise NotImplementedError

    # Parse gain
    if isinstance(gain, str):
        if gain.lower() == 'relu':
            gain = 2.
        elif gain.lower() in ['sigmoid', 'linear', 'tanh']:
            gain = 1.

    # Compute variance for He init
    var = gain/(fmapsin * fov)
    # Build kernel
    ker = np.random.normal(loc=0., scale=np.sqrt(var), size=tuple(shape)).astype(dtype)
    return ker


# Training Monitors
# Batch Number
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def sigmoid(gain=1, spread=1, mode='hard'):  # Possible modes: 'fast', 'full', 'hard'.
    if mode == 'fast':
        return lambda x: gain * nnet.ultra_fast_sigmoid(spread * x)
    elif mode == 'full':
        return lambda x: gain * nnet.sigmoid(spread * x)
    elif mode == 'hard':
        return lambda x: gain * nnet.hard_sigmoid(spread * x)
    else:
        return lambda x: gain * nnet.ultra_fast_sigmoid(spread * x)


# Linear (duh)
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def he(shape, gain=1., dtype=th.config.floatX):
    if len(shape) == 4:
        # 2D convolutions
        fmapsin = shape[1]
        fov = np.prod(shape[2:])
    elif len(shape) == 5:
        # 3D convolutions
        fmapsin = shape[2]
        fov = np.prod(shape[3:]) * shape[1]
    else:
        raise NotImplementedError

    # Parse gain
    if isinstance(gain, str):
        if gain.lower() == 'relu':
            gain = 2.
        elif gain.lower() in ['sigmoid', 'linear', 'tanh']:
            gain = 1.

    # Compute variance for He init
    var = gain/(fmapsin * fov)
    # Build kernel
    ker = np.random.normal(loc=0., scale=np.sqrt(var), size=tuple(shape)).astype(dtype)
    return ker


# Training Monitors
# Batch Number
# Loss
# Cost
# Training Error
# Validation Error
# Gradient Norm Monitor
# Monitor to check if the model is exploring previously unexplored parameter space. Returns n, where n is the number of
# dimensions in which the model has explored new param range.
# Monitor for update norm
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def build_prediction(self):
        # return NN.softmax(self.activation) #use this line to expose a slow subtensor
        # implementation
        return NN.sigmoid(self.activation)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self,
                 input=tensor.dvector('input'),
                 target=tensor.dvector('target'),
                 n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
        super(NNet, self).__init__(**kw)

        self.input = input
        self.target = target
        self.lr = shared(lr, 'learning_rate')
        self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
        self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
        # print self.lr.type

        self.hidden = sigmoid(tensor.dot(self.w1, self.input))
        self.output = tensor.dot(self.w2, self.hidden)
        self.cost = tensor.sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates)

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def propUp(self, vis):
        """This function propagates the visible units activation upwards to
        the hidden units

        Note that we return also the pre-sigmoid activation of the
        layer. As it will turn out later, due to how Theano deals with
        optimizations, this symbolic variable will be needed to write
        down a more stable computational graph (see details in the
        reconstruction cost function)
        """
        pre_sigmoid_activation = T.dot(vis, self.w) + self.hbias
        return [pre_sigmoid_activation,activation(pre_sigmoid_activation)]
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def sampleHgivenV(self, v0_sample):
        """ This function infers state of hidden units given visible units """
        # compute the activation of the hidden units given a sample of
        # the visibles
        pre_sigmoid_h1, h1_mean = self.propUp(v0_sample)
        # get a sample of the hiddens given their activation
        # Note that theano_rng.binomial returns a symbolic sample of dtype
        # int64 by default. If we want to keep our computations in floatX
        # for the GPU we need to specify to return the dtype floatX
        h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
                                             n=1, p=h1_mean,
                                             dtype=theano.config.floatX)
        return [pre_sigmoid_h1, h1_mean, h1_sample]
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def propDown(self, hid):
        """This function propagates the hidden units activation downwards to
        the visible units

        Note that we return also the pre_sigmoid_activation of the
        layer. As it will turn out later, due to how Theano deals with
        optimizations, this symbolic variable will be needed to write
        down a more stable computational graph (see details in the
        reconstruction cost function)

        """
        pre_sigmoid_activation = T.dot(hid, self.w.T) + self.vbias
        return [pre_sigmoid_activation, activation(pre_sigmoid_activation)]
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def sampleVgivenH(self, h0_sample):
        """ This function infers state of visible units given hidden units """
        # compute the activation of the visible given the hidden sample
        pre_sigmoid_v1, v1_mean = self.propDown(h0_sample)
        # get a sample of the visible given their activation
        # Note that theano_rng.binomial returns a symbolic sample of dtype
        # int64 by default. If we want to keep our computations in floatX
        # for the GPU we need to specify to return the dtype floatX
        v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
                                             n=1, p=v1_mean,
                                             dtype=theano.config.floatX)
        return [pre_sigmoid_v1, v1_mean, v1_sample]
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def getPseudoLikelihoodCost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the inputs image by rounding to nearest integer
        xi = T.round(self.inputs)

        # calculate free energy for the given bit configuration
        fe_xi = self.freeEnergy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.freeEnergy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(activation(fe_xi_flip - fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
        return cost
项目:DeepLearningPython35    作者:MichalDanielDobrzanski    | 项目源码 | 文件源码
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
                 activation_fn=sigmoid):
        """`filter_shape` is a tuple of length 4, whose entries are the number
        of filters, the number of input feature maps, the filter height, and the
        filter width.

        `image_shape` is a tuple of length 4, whose entries are the
        mini-batch size, the number of input feature maps, the image
        height, and the image width.

        `poolsize` is a tuple of length 2, whose entries are the y and
        x pooling sizes.

        """
        self.filter_shape = filter_shape
        self.image_shape = image_shape
        self.poolsize = poolsize
        self.activation_fn=activation_fn
        # initialize weights and biases
        n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
        self.w = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
                dtype=theano.config.floatX),
            borrow=True)
        self.b = theano.shared(
            np.asarray(
                np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
                dtype=theano.config.floatX),
            borrow=True)
        self.params = [self.w, self.b]
项目:3d-convolutional-network    作者:ehosseiniasl    | 项目源码 | 文件源码
def initialize_layer(self):
        rng = np.random.RandomState(None)
        W_values = np.asarray(
            rng.uniform(
                low=-np.sqrt(6. / (self.n_in + self.n_out)),
                high=np.sqrt(6. / (self.n_in + self.n_out)),
                size=(self.n_in, self.n_out)),
            dtype=theano.config.floatX)

        if self.activation == nnet.sigmoid:
            W_values *= 4

        b_values = np.zeros((self.n_out,), dtype=theano.config.floatX)
        self.W.set_value(W_values, borrow=True)
        self.b.set_value(b_values, borrow=True)
项目:3d-convolutional-network    作者:ehosseiniasl    | 项目源码 | 文件源码
def __init__(self, rng, input, n_in, n_out, share_with=None, activation=None):

        self.input = input
        self.n_in = n_in
        self.n_out = n_out
        self.activation = activation
        if share_with:
            self.W = share_with.W
            self.b = share_with.b

            self.W_delta = share_with.W_delta
            self.b_delta = share_with.b_delta
        else:
            W_values = np.asarray(
                rng.uniform(
                    low=-np.sqrt(6. / (n_in + n_out)),
                    high=np.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == nnet.sigmoid:
                W_values *= 4

            self.W = theano.shared(value=W_values, name='W', borrow=True)

            b_values = np.zeros((n_out,), dtype=theano.config.floatX)
            self.b = theano.shared(value=b_values, name='b', borrow=True)

            self.W_delta = theano.shared(
                    np.zeros((n_in, n_out), dtype=theano.config.floatX),
                    borrow=True
                )

            self.b_delta = theano.shared(value=b_values, borrow=True)

        self.params = [self.W, self.b]

        self.deltas = [self.W_delta, self.b_delta]

        lin_output = T.dot(self.input, self.W) + self.b

        if activation == 'tanh':
            self.output = T.tanh(lin_output)
        elif activation == 'sigmoid':
            self.output = nnet.sigmoid(lin_output)
        elif activation == 'relu':
            self.output = T.maximum(lin_output, 0)
        else:
            self.output = lin_output