Python theano.tensor 模块,set_subtensor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.set_subtensor()

项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def __getitem__(self, name):
        return self.layer_dict[name]


#def _pad_to_fit(x, target_shape):
    #"""
    #Spatially pad a tensor's feature maps with zeros as evenly as possible
    #(center it) to fit the target shape.

    #Expected target shape is larger than the shape of the tensor.

    #NOTE: padding may be unequal on either side of the map if the target
    #dimension is odd. This is why keras's ZeroPadding2D isn't used.
    #"""
    #pad_0 = {}
    #pad_1 = {}
    #for dim in [2, 3]:
        #pad_0[dim] = (target_shape[dim]-x.shape[dim])//2
        #pad_1[dim] = target_shape[dim]-x.shape[dim]-pad_0[dim]
    #output = T.zeros(target_shape)
    #indices = (slice(None),
               #slice(None),
               #slice(pad_0[2], target_shape[2]-pad_1[2]),
               #slice(pad_0[3], target_shape[3]-pad_1[3]))
    #return T.set_subtensor(output[indices], x)
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def crossentropy(y_pred, y_true, void_labels, one_hot=False):
    # Clip predictions
    y_pred = T.clip(y_pred, _EPSILON, 1.0 - _EPSILON)

    if one_hot:
        y_true = T.argmax(y_true, axis=1)

    # Create mask
    mask = T.ones_like(y_true, dtype=_FLOATX)
    for el in void_labels:
        mask = T.set_subtensor(mask[T.eq(y_true, el).nonzero()], 0.)

    # Modify y_true temporarily
    y_true_tmp = y_true * mask
    y_true_tmp = y_true_tmp.astype('int32')

    # Compute cross-entropy
    loss = T.nnet.categorical_crossentropy(y_pred, y_true_tmp)

    # Compute masked mean loss
    loss *= mask
    loss = T.sum(loss) / T.sum(mask)

    return loss
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        a, b, c = self.scale_factor
        upscaled = input
        if self.mode == 'repeat':
            if c > 1:
                upscaled = T.extra_ops.repeat(upscaled, c, 4)
            if b > 1:
                upscaled = T.extra_ops.repeat(upscaled, b, 3)
            if a > 1:
                upscaled = T.extra_ops.repeat(upscaled, a, 2)
        elif self.mode == 'dilate':
            if c > 1 or b > 1 or a > 1:
                output_shape = self.get_output_shape_for(input.shape)
                upscaled = T.zeros(shape=output_shape, dtype=input.dtype)
                upscaled = T.set_subtensor(
                    upscaled[:, :, ::a, ::b, ::c], input)
        return upscaled
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def mdclW(num_filters,num_channels,filter_size,winit,name,scales):
    # Coefficient Initializer
    sinit = lasagne.init.Constant(1.0/(1+len(scales)))
    # Total filter size
    size = filter_size + (filter_size-1)*(scales[-1]-1)
    # Multiscale Dilated Filter 
    W = T.zeros((num_filters,num_channels,size,size))
    # Undilated Base Filter
    baseW = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,num_channels,filter_size,filter_size))),name=name+'.W')
    for scale in enumerate(scales[::-1]): # enumerate backwards so that we place the main filter on top
            W = T.set_subtensor(W[:,:,scales[-1]-scale:size-scales[-1]+scale:scale,scales[-1]-scale:size-scales[-1]+scale:scale],
                                  baseW*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'.coeff_'+str(scale)).dimshuffle(0,'x','x','x'))
    return W

# Subpixel Upsample Layer from (https://arxiv.org/abs/1609.05158)
# This layer uses a set of r^2 set_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style
# as done in the ESPCN Magic ony paper for super-resolution.
# r is the upscale factor.
# c is the number of output channels.
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use?
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def input_batch(layer):

    idx = T.iscalar()
    X = T.tensor4()

    layer_input = lasagne.layers.get_output(layer.input_layer, X,
                                            deterministic=True)
    layer_input = layer_input.flatten(2) if layer_input.ndim > layer.inp_ndim \
        else layer_input

    b_size = X.shape[0]
    X_layer = T.set_subtensor(layer.X_layer[idx, :b_size, :], layer_input)

    updates = [(layer.X_layer, X_layer)]

    return theano.function([idx, X], updates=updates)
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def set_output(self):
        output_shape = self._output_shape
        padding = self._padding
        unpool_size = self._unpool_size
        unpooled_output = tensor.alloc(0.0,  # Value to fill the tensor
                                       output_shape[0],
                                       output_shape[1] + 2 * padding[0],
                                       output_shape[2],
                                       output_shape[3] + 2 * padding[1],
                                       output_shape[4] + 2 * padding[2])

        unpooled_output = tensor.set_subtensor(unpooled_output[:, padding[0]:output_shape[
            1] + padding[0]:unpool_size[0], :, padding[1]:output_shape[3] + padding[1]:unpool_size[
                1], padding[2]:output_shape[4] + padding[2]:unpool_size[2]],
                                               self._prev_layer.output)
        self._output = unpooled_output
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        if np.sum(self._padding) > 0:
            padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                        input_shape[0],
                                        input_shape[1] + 2 * padding[1],
                                        input_shape[2],
                                        input_shape[3] + 2 * padding[3],
                                        input_shape[4] + 2 * padding[4])

            padded_input = tensor.set_subtensor(
                padded_input[:, padding[1]:padding[1] + input_shape[1], :, padding[3]:padding[3] +
                             input_shape[3], padding[4]:padding[4] + input_shape[4]],
                self._prev_layer.output)
        else:
            padded_input = self._prev_layer.output

        self._output = conv3d2d.conv3d(padded_input, self.W.val) + \
            self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        fc_output = tensor.reshape(
            tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape)
        self._output = conv3d2d.conv3d(padded_input, self.Wh.val) + \
            fc_output + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        self._output = conv3d2d.conv3d(padded_input, self.W.val) + \
            self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def padding(self, x, pad, pad_dims, output_shape):
        # x shape: (nb_sample, input_depth, rows, cols)

        output = T.zeros((x.shape[0],) + output_shape[1:])
        indices = [slice(None), slice(None)]  # nb_sample, input_depth does not change

        for i in range(2, len(output_shape)):
            if i not in pad_dims:
                indices.append(slice(None))
            else:
                p = pad[i-2]
                if isinstance(p, (tuple,list)):
                    assert len(p)==2
                    assert p[0]!=0 or p[1]!=0
                    indices.append(slice(p[0], -p[1]))
                else:
                    if p==0:
                        indices.append(slice(None))
                    else:
                        indices.append(slice(p, -p))

        return T.set_subtensor(output[indices], x)
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def __call__(self, c01b):
        """
        .. todo::
            WRITEME
        """
        half = self.n // 2

        sq = T.sqr(c01b)

        ch, r, c, b = c01b.shape

        extra_channels = T.alloc(0., ch + 2*half, r, c, b)

        sq = T.set_subtensor(extra_channels[half:half+ch,:,:,:], sq)

        scale = self.k

        for i in xrange(self.n):
            scale += self.alpha * sq[i:i+ch,:,:,:]

        scale = scale ** self.beta

        return c01b / scale
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def __call__(self, c01b):
        """
        .. todo::
            WRITEME
        """
        half = self.n // 2

        sq = T.sqr(c01b)

        ch, r, c, b = c01b.shape

        extra_channels = T.alloc(0., ch + 2*half, r, c, b)

        sq = T.set_subtensor(extra_channels[half:half+ch,:,:,:], sq)

        scale = self.k

        for i in xrange(self.n):
            scale += self.alpha * sq[i:i+ch,:,:,:]

        scale = scale ** self.beta

        return c01b / scale
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:keras-neural-graph-fingerprint    作者:keiserlab    | 项目源码 | 文件源码
def temporal_padding(x, paddings=(1, 0), padvalue=0):
    '''Pad the middle dimension of a 3D tensor
    with `padding[0]` values left and `padding[1]` values right.

    Modified from keras.backend.temporal_padding
    https://github.com/fchollet/keras/blob/3bf913d/keras/backend/theano_backend.py#L590

    TODO: Implement for tensorflow (supposebly more easy)
    '''
    if not isinstance(paddings, (tuple, list, ndarray)):
        paddings = (paddings, paddings)

    input_shape = x.shape
    output_shape = (input_shape[0],
                    input_shape[1] + sum(paddings),
                    input_shape[2])
    output = T.zeros(output_shape)

    # Set pad value and set subtensor of actual tensor
    output = T.set_subtensor(output[:, :paddings[0], :], padvalue)
    output = T.set_subtensor(output[:, paddings[1]:, :], padvalue)
    output = T.set_subtensor(output[:, paddings[0]:x.shape[1] + paddings[0], :], x)
    return output
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def nll_of_x_given_o(self, input, ordering):
        """ Returns the theano graph that computes $-ln p(\bx|o)$.

        Parameters
        ----------
        input: 1D vector
            One image with shape (nb_channels * images_height * images_width).

        ordering: 1D vector of int
            List of pixel indices representing the input ordering.
        """

        D = int(np.prod(self.image_shape))
        mask_o_d = T.zeros((D, D), dtype=theano.config.floatX)
        mask_o_d = T.set_subtensor(mask_o_d[T.arange(D), ordering], 1.)

        mask_o_lt_d = T.cumsum(mask_o_d, axis=0)
        mask_o_lt_d = T.set_subtensor(mask_o_lt_d[1:], mask_o_lt_d[:-1])
        mask_o_lt_d = T.set_subtensor(mask_o_lt_d[0, :], 0.)

        input = T.tile(input[None, :], (D, 1))
        nll = -T.sum(self.lnp_x_o_d_given_x_o_lt_d(input, mask_o_d, mask_o_lt_d))
        return nll
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        shape = (input_.shape[0],) + (self.input_shape[0], self.input_shape[1] + self.padding[0] + self.padding[1], self.input_shape[2] + self.padding[2] + self.padding[3])
        result = T.zeros(shape, dtype=theano.config.floatX)  # make zero output
        indices = (slice(None),
                   slice(None),
                   slice(self.padding[0], self.input_shape[1] + self.padding[0]),
                   slice(self.padding[2], self.input_shape[2] + self.padding[2])
                   )
        return T.set_subtensor(result[indices], input)



# TODO: MAKE THIS WORK!
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        if self.upscale_mode == 'repeat':
            upscaled = T.extra_ops.repeat(input_, self.scale_factor[0], 2)
            upscaled = T.extra_ops.repeat(upscaled, self.scale_factor[1], 3)
        else:
            upscaled = T.zeros((input_.shape[0], input_.shape[1], \
                input_.shape[2] * self.scale_factor[0], input_.shape[3] * self.scale_factor[1]), dtype=theano.config.floatX)
            upscaled = T.set_subtensor(upscaled[:, :, ::self.scale_factor[0], ::self.scale_factor[1]], input_)

        return upscaled
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use?
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def semantic_matrix(argv):
    assert len(argv) == 2
    q = argv[0]
    a = argv[1]
    q_sqrt = K.sqrt((q ** 2).sum(axis=2, keepdims=True))
    a_sqrt = K.sqrt((a ** 2).sum(axis=2, keepdims=True))
    denominator = K.batch_dot(q_sqrt, K.permute_dimensions(a_sqrt, [0,2,1]))
    return K.batch_dot(q, K.permute_dimensions(a, [0,2,1])) / (denominator + SAFE_EPSILON)

# ??idx??????
# ??????batch index????????
# ??https://groups.google.com/forum/#!topic/theano-users/7gUdN6E00Dc
# ??argmax???2 - axis
# ??theano??a > 0????????[1,1,0]?????????????
# ?bool???????????
# ??????????T.set_subtensor(ib[(ib < 0).nonzero()], 0)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def stretch_axis(a, axis, factor, original_shape):
    new_shape = [original_shape[0], original_shape[1],
                 original_shape[2], original_shape[3],
                 original_shape[4]]
    new_shape[axis] *= factor
    out_first = T.zeros(new_shape)

    indices_first = [slice(None),] * 5
    indices_first[axis] = slice(0, new_shape[axis], factor*2)
    indices_second = [slice(None),] * 5
    indices_second[axis] = slice(factor*2-1, new_shape[axis], factor*2)

    indices_take_first = [slice(None),] * 5
    indices_take_first[axis] = slice(0, original_shape[axis], factor)
    indices_take_second = [slice(None),] * 5
    indices_take_second[axis] = slice(1, original_shape[axis], factor)

    out_second = T.set_subtensor(out_first[indices_first], a[indices_take_first])
    out = T.set_subtensor(out_second[indices_second], a[indices_take_second])

    return out
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def sample_gmm(mu, sigma, weight, theano_rng):

    k = weight.shape[-1]
    dim = mu.shape[-1] / k

    shape_result = weight.shape
    shape_result = tensor.set_subtensor(shape_result[-1], dim)
    ndim_result = weight.ndim

    mu = mu.reshape((-1, dim, k))
    sigma = sigma.reshape((-1, dim, k))
    weight = weight.reshape((-1, k))

    sample_weight = theano_rng.multinomial(pvals=weight, dtype=weight.dtype)
    idx = predict(sample_weight, axis=-1)

    mu = mu[tensor.arange(mu.shape[0]), :, idx]
    sigma = sigma[tensor.arange(sigma.shape[0]), :, idx]

    epsilon = theano_rng.normal(
        size=mu.shape, avg=0., std=1., dtype=mu.dtype)

    result = mu + sigma * epsilon

    return result.reshape(shape_result, ndim=ndim_result)
项目:hgru4rec    作者:mquad    | 项目源码 | 文件源码
def adam(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
        v1 = np.float32(self.decay)
        v2 = np.float32(1.0 - self.decay)
        acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        meang = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        countt = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        if sample_idx is None:
            acc_new = v1 * acc + v2 * grad ** 2
            meang_new = v1 * meang + v2 * grad
            countt_new = countt + 1
            updates[acc] = acc_new
            updates[meang] = meang_new
            updates[countt] = countt_new
        else:
            acc_s = acc[sample_idx]
            meang_s = meang[sample_idx]
            countt_s = countt[sample_idx]
            acc_new = v1 * acc_s + v2 * grad ** 2
            meang_new = v1 * meang_s + v2 * grad
            countt_new = countt_s + 1.0
            updates[acc] = T.set_subtensor(acc_s, acc_new)
            updates[meang] = T.set_subtensor(meang_s, meang_new)
            updates[countt] = T.set_subtensor(countt_s, countt_new)
        return (meang_new / (1 - v1 ** countt_new)) / (T.sqrt(acc_new / (1 - v1 ** countt_new)) + epsilon)
项目:hgru4rec    作者:mquad    | 项目源码 | 文件源码
def adadelta(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
        v1 = np.float32(self.decay)
        v2 = np.float32(1.0 - self.decay)
        acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        upd = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        if sample_idx is None:
            acc_new = acc + grad ** 2
            updates[acc] = acc_new
            grad = T.sqrt(upd + epsilon) * grad
            upd_new = v1 * upd + v2 * grad ** 2
            updates[upd] = upd_new
        else:
            acc_s = acc[sample_idx]
            acc_new = acc_s + grad ** 2
            updates[acc] = T.set_subtensor(acc_s, acc_new)
            upd_s = upd[sample_idx]
            upd_new = v1 * upd_s + v2 * grad ** 2
            updates[upd] = T.set_subtensor(upd_s, upd_new)
            grad = T.sqrt(upd_s + epsilon) * grad
        gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
        return grad / gradient_scaling
项目:NeuralNetLibrary    作者:SeanJia    | 项目源码 | 文件源码
def forward(self, data, stable_version=False):
        """input has each row as data vector; output also does so"""
        count = 1
        for bias, weight, pre_w, post_w in zip(self.biases, self.weights, self.pre_w, self.post_w):
            size = pre_w[0].shape[0]
            zeros_pre_w = T.zeros((size + 4, size + 4))
            zeros_post_w = T.zeros((size + 4, size + 4))
            pre_w_padding = T.set_subtensor(zeros_pre_w[2: size + 2, 2: size + 2], pre_w[0])
            post_w_padding_T = T.set_subtensor(zeros_post_w[2: size + 2, 2: size + 2], post_w[0])
            pre, updt = scan(process_pre_post_w, sequences=[pre_w_padding, zeros_pre_w])
            post_T, updt = scan(process_pre_post_w, sequences=[post_w_padding_T, zeros_post_w])
            pre, post_T = pre[2:size + 2, :], post_T[2:size + 2, :]
            ori_shape = data.shape
            data = T.reshape(data, (ori_shape[0], pre_w[0].shape[0], pre_w[0].shape[0]))
            product, updt = scan(lambda x, A, B: T.dot(T.dot(A, x), B), sequences=data, non_sequences=[pre, post_T.T])
            data = T.reshape(product, ori_shape)
            if count < self.num_layers - 1:
                data = T.nnet.relu(T.dot(data, weight) + bias)
            elif not stable_version:
                data = T.nnet.softmax(T.dot(data, weight) + bias)
            else:
                data = log_softmax(T.dot(data, weight) + bias)
            count += 1
        return data
项目:denet    作者:lachlants    | 项目源码 | 文件源码
def __init__(self, layers, border = 0, json_param={}):
        super().__init__(layer_index=len(layers))

        self.input = layers[-1].output
        self.input_shape = layers[-1].output_shape

        #border = (Left, Right, Top, Bottom)
        if type(border) is int:
            border = (border, border, border, border)
        elif len(border) == 1:
            border = (border[0], border[0], border[0], border[0])

        assert len(border) == 4
        self.border = json_param.get("border", border)

        self.output_shape = list(self.input_shape)
        self.output_shape[-1] += self.border[0]+self.border[1]
        self.output_shape[-2] += self.border[2]+self.border[3]
        self.output_shape = tuple(self.output_shape)

        self.output = tensor.zeros(self.output_shape)
        self.output = tensor.set_subtensor(self.output[:,:, self.border[2]:(self.input_shape[-2]+self.border[2]), self.border[0]:(self.input_shape[-1]+self.border[0])], self.input)

        logging.verbose("Adding", self)
项目:dl4nlp_in_theano    作者:luyaojie    | 项目源码 | 文件源码
def temporal_padding_2d(x, padding=(1, 1)):
    """Pad the middle dimension of a 2D matrix
    with "padding" zeros left and right.

    Apologies for the inane API, but Theano makes this
    really hard.
    Code from https://github.com/fchollet/keras/blob/master/keras/backend/theano_backend.py
    x: (length, dim)
    """
    assert len(padding) == 2
    input_shape = x.shape
    output_shape = (input_shape[0] + padding[0] + padding[1],
                    input_shape[1])
    output = T.zeros(output_shape)
    result = T.set_subtensor(output[padding[0]:x.shape[0] + padding[0], :], x)
    return result
项目:dl4nlp_in_theano    作者:luyaojie    | 项目源码 | 文件源码
def temporal_padding_3d(x, padding=(1, 1)):
    """Pad the middle dimension of a 3D tensor
    with "padding" zeros left and right.

    Apologies for the inane API, but Theano makes this
    really hard.
    Code from https://github.com/fchollet/keras/blob/master/keras/backend/theano_backend.py
    """
    assert len(padding) == 2
    input_shape = x.shape
    output_shape = (input_shape[0],
                    input_shape[1] + padding[0] + padding[1],
                    input_shape[2])
    output = T.zeros(output_shape)
    result = T.set_subtensor(output[:, padding[0]:x.shape[1] + padding[0], :], x)
    return result
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:deep-learning-essentials    作者:DominicBreuker    | 项目源码 | 文件源码
def call(self, x, mask=None):
        X = x
        half_n = self.n // 2
        input_sqr = K.square(X)
        if K._BACKEND == 'theano':
            b, ch, r, c = X.shape
            extra_channels = T.alloc(0., b, ch + 2*half_n, r, c)
            input_sqr = T.set_subtensor(
                extra_channels[:, half_n:half_n+ch, :, :], input_sqr)
        elif K._BACKEND == 'tensorflow':
            b, ch, r, c = K.int_shape(X)
            up_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            up = tf.fill(up_dims, 0.0)
            middle = input_sqr
            down_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            down = tf.fill(down_dims, 0.0)
            input_sqr = K.concatenate([up, middle, down], axis=1)
        scale = self.k
        norm_alpha = self.alpha / self.n
        for i in range(self.n):
            scale += norm_alpha * input_sqr[:, i:i+ch, :, :]
        scale = scale ** self.beta
        result = X / scale
        return result
项目:gymexperiments    作者:tambetm    | 项目源码 | 文件源码
def _L(x):
    # initialize with zeros
    batch_size = x.shape[0]
    a = T.zeros((batch_size, num_actuators, num_actuators))
    # set diagonal elements
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), num_actuators)
    diag_idx = T.tile(T.arange(num_actuators), batch_size)
    b = T.set_subtensor(a[batch_idx, diag_idx, diag_idx], T.flatten(T.exp(x[:, :num_actuators])))
    # set lower triangle
    cols = np.concatenate([np.array(range(i), dtype=np.uint) for i in xrange(num_actuators)])
    rows = np.concatenate([np.array([i]*i, dtype=np.uint) for i in xrange(num_actuators)])
    cols_idx = T.tile(T.as_tensor_variable(cols), batch_size)
    rows_idx = T.tile(T.as_tensor_variable(rows), batch_size)
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), len(cols))
    c = T.set_subtensor(b[batch_idx, rows_idx, cols_idx], T.flatten(x[:, num_actuators:]))
    return c
项目:gymexperiments    作者:tambetm    | 项目源码 | 文件源码
def _L(x):
    # initialize with zeros
    batch_size = x.shape[0]
    a = T.zeros((batch_size, num_actuators, num_actuators))
    # set diagonal elements
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), num_actuators)
    diag_idx = T.tile(T.arange(num_actuators), batch_size)
    b = T.set_subtensor(a[batch_idx, diag_idx, diag_idx], T.flatten(T.exp(x[:, :num_actuators])))
    # set lower triangle
    cols = np.concatenate([np.array(range(i), dtype=np.uint) for i in xrange(num_actuators)])
    rows = np.concatenate([np.array([i]*i, dtype=np.uint) for i in xrange(num_actuators)])
    cols_idx = T.tile(T.as_tensor_variable(cols), batch_size)
    rows_idx = T.tile(T.as_tensor_variable(rows), batch_size)
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), len(cols))
    c = T.set_subtensor(b[batch_idx, rows_idx, cols_idx], T.flatten(x[:, num_actuators:]))
    return c
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def temporal_padding(x, padding=(1, 1)):
    """Pad the middle dimension of a 3D tensor
    with "padding" zeros left and right.

    Apologies for the inane API, but Theano makes this
    really hard.
    """
    assert len(padding) == 2
    input_shape = x.shape
    output_shape = (input_shape[0],
                    input_shape[1] + padding[0] + padding[1],
                    input_shape[2])
    output = T.zeros(output_shape)
    result = T.set_subtensor(output[:, padding[0]:x.shape[1] + padding[0], :], x)
    if hasattr(x, '_keras_shape'):
        result._keras_shape = (x._keras_shape[0],
                               x._keras_shape[1] + py_sum(padding),
                               x._keras_shape[2])
    return result
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, a, s=None):
        a = T.as_tensor_variable(a)
        if a.ndim < 3:
            raise TypeError('%s: input must have dimension >= 3,  with ' %
                            self.__class__.__name__ +
                            'first dimension batches and last real/imag parts')

        if s is None:
            s = a.shape[1:-1]
            s = T.set_subtensor(s[-1], (s[-1] - 1) * 2)
            s = T.as_tensor_variable(s)
        else:
            s = T.as_tensor_variable(s)
            if (not s.dtype.startswith('int')) and \
               (not s.dtype.startswith('uint')):
                raise TypeError('%s: length of the transformed axis must be'
                                ' of type integer' % self.__class__.__name__)
        return gof.Apply(self, [a, s], [self.output_type(a)()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_assert(self):
            x = tensor.matrix("x")
            y = tensor.matrix("y")
            idx = tensor.ivector()

            dx = numpy.random.rand(4, 5).astype(config.floatX)
            dy = numpy.random.rand(2, 5).astype(config.floatX)
            didx = numpy.asarray([1, 3], "int32")

            # set_subtensor
            inc = tensor.set_subtensor(x[idx], y)
            o = inc[idx]
            f = theano.function([x, y, idx], o, self.mode)
            # test wrong index
            for i in [dx.shape[0], -dx.shape[0] - 1]:
                self.assertRaises((AssertionError, IndexError),
                                  f, dx, dy, [i, i])
            # test wrong shape
            self.assertRaises((AssertionError, ValueError),
                              f, dx, dy, [1])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_stack_trace(self):
        x = tensor.matrix("x")
        # test cases with y.dtype
        # - equal to x.dtype
        # - different from x.dtype (to trigger the cast in
        #   local_adv_sub1_adv_inc_sub1)
        ys = [tensor.matrix("y"), tensor.dmatrix("y")]
        idx = tensor.ivector()

        # set_subtensor and then subtensor with both ys
        incs = [tensor.set_subtensor(x[idx], y) for y in ys]
        outs = [inc[idx] for inc in incs]

        for y, out in zip(ys, outs):
            f = theano.function([x, y, idx], out, self.mode)
            self.assertTrue(check_stack_trace(
                f, ops_to_check=(Assert, scal.Cast)))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_grad_2d_inc_set_subtensor(self):
        for n_shape, m_shape in [
            [(2, 3), (2, 2)],
            [(3, 2), (2, 2)],
            [(3, 2), (1, 2)],
            [(3, 2), (2,)],
        ]:
            for op in [inc_subtensor, set_subtensor]:
                subi = 2
                data = numpy.asarray(rand(*n_shape), dtype=self.dtype)
                n = self.shared(data)
                z = scal.constant(subi)
                m = matrix('m', dtype=self.dtype)
                mv = numpy.asarray(rand(*m_shape), dtype=self.dtype)

                t = op(n[:z, :z], m)
                gn, gm = theano.tensor.grad(theano.tensor.sum(t), [n, m])
                utt.verify_grad(lambda m: op(n[:z, :z], m), [mv])
                utt.verify_grad(lambda nn: op(nn[:z, :z], mv), [data])
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def T_one_hot(inp_tensor, n_classes):
    """
    :todo:
        - Implement other methods from here: 
        - Compare them speed-wise for different sizes
        - Implement N_one_hot for Numpy version, with speed tests.

    Theano one-hot (1-of-k) from an input tensor of indecies.
    If the indecies are of the shape (a0, a1, ..., an) the output
    shape would be (a0, a1, ..., a2, n_classes).

    :params:
        - inp_tensor: any theano tensor with dtype int* as indecies and all of
                      them between [0, n_classes-1].
        - n_classes: number of classes which determines the output size.

    :usage:
        >>> idx = T.itensor3()
        >>> idx_val = numpy.array([[[0,1,2,3],[4,5,6,7]]], dtype='int32')
        >>> one_hot = T_one_hot(t, 8)
        >>> one_hot.eval({idx:idx_val})
        >>> print out
        array([[[[ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.]],
        [[ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.]]]])
        >>> print idx_val.shape, out.shape
        (1, 2, 4) (1, 2, 4, 8)
    """
    flattened = inp_tensor.flatten()
    z = T.zeros((flattened.shape[0], n_classes), dtype=theano.config.floatX)
    one_hot = T.set_subtensor(z[T.arange(flattened.shape[0]), flattened], 1)
    out_shape = [inp_tensor.shape[i] for i in xrange(inp_tensor.ndim)] + [n_classes]
    one_hot = one_hot.reshape(out_shape)
    return one_hot
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def get_output_for(self, upscaled, **kwargs):
        a, b = self.scale_factor
        # get output for pooling and pre-pooling layer
        inp, out =\
                lasagne.layers.get_output([self.pool2d_layer_in,
                                           self.pool2d_layer])
        # upscale the input feature map by scale_factor
        if b > 1:
            upscaled = T.extra_ops.repeat(upscaled, b, 3)
        if a > 1:
            upscaled = T.extra_ops.repeat(upscaled, a, 2)
        # get the shapes for pre-pooling layer and upscaled layer
        sh_pool2d_in = T.shape(inp)
        sh_upscaled = T.shape(upscaled)
        # in case the shape is different left-bottom-pad with zero
        tmp = T.zeros(sh_pool2d_in)

        indx = (slice(None),
                slice(None),
                slice(0, sh_upscaled[2]),
                slice(0, sh_upscaled[3]))
        upscaled = T.set_subtensor(tmp[indx], upscaled)
        # get max pool indices
        indices_pool = T.grad(None, wrt=inp,
                known_grads={out: T.ones_like(out)})
        # mask values using indices_pool
        f = indices_pool * upscaled
        return f
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def jaccard(y_pred, y_true, n_classes, one_hot=False):

    assert (y_pred.ndim == 2) or (y_pred.ndim == 1)

    # y_pred to indices
    if y_pred.ndim == 2:
        y_pred = T.argmax(y_pred, axis=1)

    if one_hot:
        y_true = T.argmax(y_true, axis=1)

    # Compute confusion matrix
    cm = T.zeros((n_classes, n_classes))
    for i in range(n_classes):
        for j in range(n_classes):
            cm = T.set_subtensor(
                cm[i, j], T.sum(T.eq(y_pred, i) * T.eq(y_true, j)))

    # Compute Jaccard Index
    TP_perclass = T.cast(cm.diagonal(), _FLOATX)
    FP_perclass = cm.sum(1) - TP_perclass
    FN_perclass = cm.sum(0) - TP_perclass

    num = TP_perclass
    denom = TP_perclass + FP_perclass + FN_perclass

    return T.stack([num, denom], axis=0)
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def accuracy(y_pred, y_true, void_labels, one_hot=False):

    assert (y_pred.ndim == 2) or (y_pred.ndim == 1)

    # y_pred to indices
    if y_pred.ndim == 2:
        y_pred = T.argmax(y_pred, axis=1)

    if one_hot:
        y_true = T.argmax(y_true, axis=1)

    # Compute accuracy
    acc = T.eq(y_pred, y_true).astype(_FLOATX)

    # Create mask
    mask = T.ones_like(y_true, dtype=_FLOATX)
    for el in void_labels:
        indices = T.eq(y_true, el).nonzero()
        if any(indices):
            mask = T.set_subtensor(mask[indices], 0.)

    # Apply mask
    acc *= mask
    acc = T.sum(acc) / T.sum(mask)

    return acc
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def localResponseNormalizationCrossChannel(incoming, alpha=1e-4,
                                           k=2, beta=0.75, n=5):
    """
    Implement the local response normalization cross the channels described
    in <ImageNet Classification with Deep Convolutional Neural Networks>,
    A.Krizhevsky et al. sec.3.3.
    Reference of the code:
    https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/
    normalization.py
    https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/expr/normalize.py
    Parameters:
    incomping: The feature maps. (output of the convolution layer).
    alpha: float scalar
    k: float scalr
    beta: float scalar
    n: integer: number of adjacent channels. Must be odd.
    """
    if n % 2 == 0:
        raise NotImplementedError("Works only with odd n")

    input_shape = incoming.shape
    half_n = n // 2
    input_sqr = T.sqr(incoming)
    b, ch, r, c = input_shape
    extra_channels = T.alloc(0., b, ch + 2*half_n, r, c)
    input_sqr = T.set_subtensor(extra_channels[:, half_n:half_n+ch, :, :],
                                input_sqr)
    scale = k
    for i in range(n):
        scale += alpha * input_sqr[:, i:i+ch, :, :]
    scale = scale ** beta

    return incoming / scale