Python theano.tensor 模块,prod() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.prod()

项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def nll_loss_sharedparams(self, mus, sigmas, corxy, pis, y_true):
        mus_ex = mus[np.newaxis, :, :]
        X = y_true[:, np.newaxis, :]
        diff = X - mus_ex
        diffprod = T.prod(diff, axis=-1)
        corxy2 = corxy **2
        diff2 = diff ** 2
        sigmas2 = sigmas ** 2
        sigmainvs = 1.0 / sigmas
        sigmainvprods = sigmainvs[:, 0] * sigmainvs[:, 1]
        diffsigma = diff2 / sigmas2
        diffsigmanorm = T.sum(diffsigma, axis=-1)
        z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
        oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
        expterm = -0.5 * z * oneminuscorxy2inv
        new_exponent = T.log(0.5/np.pi) + T.log(sigmainvprods) + T.log(np.sqrt(oneminuscorxy2inv)) + expterm + T.log(pis)
        max_exponent = T.max(new_exponent ,axis=1, keepdims=True)
        mod_exponent = new_exponent - max_exponent
        gauss_mix = T.sum(T.exp(mod_exponent),axis=1)
        log_gauss = max_exponent + T.log(gauss_mix)
        loss = -T.mean(log_gauss)
        return loss
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def __init__(self, input_shape, output_shape):
        """
        This function initializes the class.
        Input is 4D tensor, output is 2D tensor.

        Parameters
        ----------
        input_shape: tuple
            a tuple of three values, i.e., (input channel, input width, input height).
        output_shape: tuple
            a tuple of single value, i.e., (input channel,) or (input dim,).
        """
        super(Flatten3DLayer, self).__init__()
        # check asserts

        assert isinstance(input_shape, tuple) and len(input_shape) == 3, '"input_shape" should be a tuple with three values.'
        assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple with single value.'
        assert np.prod(input_shape) == output_shape[0], 'Flatten result is 2D tensor of (batch size, input channel * input width * input height).'

        # set members
        self.input_shape = input_shape
        self.output_shape = output_shape
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def __init__(self, input_shape, output_shape):
        """
        This function initializes the class.

        Parameters
        ----------
        input_shape: tuple
            a tuple of input shape.
        output_shape: tuple
            a tuple of output shape.
        """
        super(ReshapeLayer, self).__init__()
        # check asserts
        assert isinstance(input_shape, tuple), '"input_shape" should be a tuple.'
        assert isinstance(output_shape, tuple), '"output_shape" should be a tuple.'
        assert np.prod(input_shape) == np.prod(output_shape), 'Reshape size should be equal.'

        # set members
        self.input_shape = input_shape
        self.output_shape = output_shape
项目:bigan    作者:jeffdonahue    | 项目源码 | 文件源码
def get_output(self, h, nout=None, stddev=None,
                   reparameterize=reparam, exp_reparam=exp_reparam):
        h, h_shape, h_max = h.value, h.shape, h.index_max
        nin = np.prod(h_shape[1:], dtype=np.int) if (h_max is None) else h_max
        out_shape_specified = isinstance(nout, tuple)
        if out_shape_specified:
            out_shape = nout
        else:
            assert isinstance(nout, int)
            out_shape = nout,
        nout = np.prod(out_shape)
        nin_axis = [0]
        W = self.weights((nin, nout), stddev=stddev,
            reparameterize=reparameterize, nin_axis=nin_axis,
            exp_reparam=exp_reparam)
        if h_max is None:
            if h.ndim > 2:
                h = T.flatten(h, 2)
            out = T.dot(h, W)
        else:
            assert nin >= 1, 'FC: h.index_max must be >= 1; was: %s' % (nin,)
            assert h.ndim == 1
            out = W[h]
        return Output(out)
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_psi2_theano(self, lls, lsf, xmean, xvar, z):
        ls2 = T.exp(2.0*lls)
        sf2 = T.exp(2.0*lsf)
        ls2p2xvar = ls2 + xvar + xvar
        constterm1 = ls2 / ls2p2xvar
        constterm2 = T.prod(T.sqrt(constterm1))
        z1mz2 = z[:, None, :] - z[None, :, :]
        a = -z1mz2**2
        b = (4.0*ls2)
        expo1 = a / b
        z1pz2 = z[:, None, :] + z[None, :, :]
        z1pz2mx = z1pz2 - xmean - xmean
        expo2 = -z1pz2mx**2.0 / (4.0*ls2p2xvar)
        expoterm = T.exp((expo1 + expo2).sum(2))
        psi2 = sf2[0]**2.0 * constterm2 * expoterm
        return psi2
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_flatten_lift():
    for i in xrange(1, 4):
        x = tensor.tensor4()
        out = tensor.flatten(T.exp(x), i)
        assert out.ndim == i
        mode = compile.mode.get_default_mode()
        mode = mode.including('local_reshape_lift')
        f = theano.function([x], out, mode=mode)
        x_np = numpy.random.rand(5, 4, 3, 2).astype(config.floatX)
        out_np = f(x_np)
        topo = f.maker.fgraph.toposort()
        shape_out_np = tuple(x_np.shape[:i-1])+(numpy.prod(x_np.shape[i-1:]),)
        assert shape_out_np == out_np.shape

        reshape_nodes = [n for n in topo if isinstance(n.op, tensor.Reshape)]
        assert (len(reshape_nodes) == 1 and
            tensor.is_flat(reshape_nodes[0].outputs[0], outdim=i))
        assert isinstance(topo[-1].op, tensor.Elemwise)
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        if apply_nl:
            ps = nonlinearities.sigmoid(input)
        prod = T.prod(ps, axis=(1,2))
        output = 1 - prod
        return output
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_conv_xy_all(layer, deterministic=True):
    w_np = layer.W.get_value()
    w = layer.W
    if layer.flip_filters:
        w = w[:, :, ::-1, ::-1]

    input_layer = layer.input_layer
    if layer.pad == 'same':
        input_layer = L.PadLayer(layer.input_layer,
                                 width=np.array(w_np.shape[2:])//2,
                                 batch_ndim=2)
    input_shape = L.get_output_shape(input_layer)
    output_shape = L.get_output_shape(layer)
    max_x = input_shape[2] - w_np.shape[2]+1
    max_y = input_shape[3] - w_np.shape[3]+1
    #print("input_shape shape: ", input_shape)
    #print("output_shape shape: ", output_shape,np.prod(output_shape[2:]))
    #print("pad: \"%s\""%layer.pad)
    #print(" stride: " ,layer.stride)
    #print("max_x %d max_y %d"%(max_x,max_y))
    x_orig = L.get_output(input_layer, deterministic=True)

    x = theano.tensor.nnet.neighbours.images2neibs(x_orig,
                                                   neib_shape=layer.filter_size,
                                                   neib_step=layer.stride,
                                                   mode='valid')
    x = T.reshape(x, (x_orig.shape[0], -1,
                      np.prod(output_shape[2:]), np.prod(w_np.shape[2:])))
    x = T.transpose(x, (0, 2, 1, 3))
    x = T.reshape(x, (-1, T.prod(x.shape[2:])))

    w = T.flatten(w, outdim=2).T  # D,O
    y = T.dot(x, w) # N,O
    if layer.b is not None:
        y += T.shape_padaxis(layer.b, axis=0)
    return x, y
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def nll_loss_sharedparams(self, mus, sigmas, corxy, pis, y_true):
        """
        negative log likelihood loss of a 2d y_true coordinate in
        each of the Gaussians with parameters mus, sigmas, corxy, pis.
        Note that the mus, sigmas and corxy are shared between all samples
        and only pis are different for each sample.

        The formula for negative log likelihood is :
        \mathcal{L}(y \vert x) = - \log\bigg\{\sum_{k=1}^K \pi_k(x)  \mathcal{N}\big(y \vert \mu_k(x), \Sigma_k(x)\big)\bigg\}

        The size of pis is n_batch x n_components,
        the size of mus is n_components x 2,
        the size of sigmas is n_components x 2 and
        the size of corxy is n_components x 1.

        The size of y_true is batch_size x 2.
        """
        mus_ex = mus[np.newaxis, :, :]
        X = y_true[:, np.newaxis, :]
        diff = X - mus_ex
        diffprod = T.prod(diff, axis=-1)
        corxy2 = corxy ** 2
        diff2 = diff ** 2
        sigmas2 = sigmas ** 2
        sigmainvs = 1.0 / sigmas
        sigmainvprods = sigmainvs[:, 0] * sigmainvs[:, 1]
        diffsigma = diff2 / sigmas2
        diffsigmanorm = T.sum(diffsigma, axis=-1)
        z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
        oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
        expterm = -0.5 * z * oneminuscorxy2inv
        #apply logsumExp trick for numerical stability https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
        new_exponent = T.log(0.5 / np.pi) + T.log(sigmainvprods) + T.log(np.sqrt(oneminuscorxy2inv)) + expterm + T.log(pis)
        max_exponent = T.max(new_exponent , axis=1, keepdims=True)
        mod_exponent = new_exponent - max_exponent
        gauss_mix = T.sum(T.exp(mod_exponent), axis=1)
        log_gauss = max_exponent + T.log(gauss_mix)
        loss = -T.mean(log_gauss)
        return loss
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def pred_sharedparams(self, mus, sigmas, corxy, pis, prediction_method='mixture'):
        """
        Given a mixture of Gaussians infer a mu that maximizes the mixture.
        There are two modes:
        If prediction_method==mixture then predict one of the mus that maximizes
        \mathcal{P}(\boldsymbol{x}) = \sum_{k=1}^{K} \pi_k \mathcal{N}(\boldsymbol{x} \vert \boldsymbol{\mu_k}, \Sigma_k)

        If prediction_method==pi return the mu that has the largest pi.
        """

        if prediction_method == 'mixture':
            X = mus[:, np.newaxis, :]
            diff = X - mus
            diffprod = np.prod(diff, axis=-1)
            sigmainvs = 1.0 / sigmas
            sigmainvprods = sigmainvs[:, 0] * sigmainvs[:, 1]
            sigmas2 = sigmas ** 2
            corxy2 = corxy ** 2
            diff2 = diff ** 2
            diffsigma = diff2 / sigmas2
            diffsigmanorm = np.sum(diffsigma, axis=-1)
            z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
            oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
            term = -0.5 * z * oneminuscorxy2inv
            expterm = np.exp(term)
            probs = (0.5 / np.pi) * sigmainvprods * np.sqrt(oneminuscorxy2inv) * expterm
            piprobs = pis[:, np.newaxis, :] * probs
            piprobsum = np.sum(piprobs, axis=-1)
            preds = np.argmax(piprobsum, axis=1)
            selected_mus = mus[preds, :]
            return selected_mus

        elif prediction_method == 'pi':
            logging.info('only pis are used for prediction')
            preds = np.argmax(pis, axis=1)
            selected_mus = mus[preds, :]
            #selected_sigmas = sigmas[np.arange(sigmas.shape[0]), :, preds]
            #selected_corxy = corxy[np.arange(corxy.shape[0]),preds]
            #selected_pis = pis[np.arange(pis.shape[0]),preds]        
            return selected_mus
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def count_params(x):
    '''Returns the number of scalars in a tensor.

    Return: numpy integer.
    '''
    return np.prod(x.shape.eval())
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    '''Multiply the values in a tensor, alongside the specified axis.
    '''
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def batch_flatten(x):
    '''Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    '''
    x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
    return x
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
        old_p = old_dist_info_vars["p"]
        new_p = new_dist_info_vars["p"]
        return TT.prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
                       axis=-1)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
        old_p = old_dist_info_vars["p"]
        new_p = new_dist_info_vars["p"]
        return TT.prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
                       axis=-1)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def prod(self, x, axis=None, keepdims=False):
        '''Multiply the values in a tensor, alongside the specified axis.
        '''
        return T.prod(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def count_params(x):
    """Returns the number of scalars in a tensor.

    Return: numpy integer.
    """
    return np.prod(x.shape.eval())
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    """Multiply the values in a tensor, alongside the specified axis.
    """
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def batch_flatten(x):
    """Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    """
    # TODO: `keras_shape` inference.
    x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
    return x
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        return T.reshape(input_, (input_.shape[0], T.prod(input_.shape[1:])))
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def flatten_keeping_first(x):
    """
        Flatten all but the first dimension
    """
    return T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def count_params(x):
    '''Returns the number of scalars in a tensor.

    Return: numpy integer.
    '''
    return np.prod(x.shape.eval())
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    '''Multiply the values in a tensor, alongside the specified axis.
    '''
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def batch_flatten(x):
    '''Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    '''
    # TODO: `keras_shape` inference.
    x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
    return x
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def count_params(x):
    '''Return number of scalars in a tensor.

    Return: numpy integer.
    '''
    return np.prod(x.shape.eval())
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    '''Multiply the values in a tensor, alongside the specified axis.
    '''
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def batch_flatten(x):
    '''Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    '''
    x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
    return x
项目:bigan    作者:jeffdonahue    | 项目源码 | 文件源码
def get_output(self, h, W):
        h, h_shape, h_max = h.value, h.shape, h.index_max
        nin = np.prod(h_shape[1:], dtype=np.int) if (h_max is None) else h_max
        assert nin == W.shape[0]
        W = W.value
        if h_max is None:
            if h.ndim > 2:
                h = T.flatten(h, 2)
            out = T.dot(h, W)
        else:
            assert nin >= 1, 'FC: h.index_max must be >= 1; was: %s' % (nin,)
            assert h.ndim == 1
            out = W[h]
        return Output(out)
项目:bigan    作者:jeffdonahue    | 项目源码 | 文件源码
def deconvnet_28(h, N=None, nout=3, size=None, bn_flat=True,
                 nonlin='ReLU', bnkwargs=kwargs28, bn_use_ave=False,
                 **ignored_kwargs):
    cond = h
    if N is None: N = Net()
    nonlin = getattr(N, nonlin)
    if size is None: size = 64
    def bn(h):
        return batch_norm(N, h, use_ave=bn_use_ave, **bnkwargs)
    def acts(h, ksize=1):
        h = apply_cond(N, h, cond=cond, ksize=ksize)
        h = bn(h)
        h = nonlin(h)
        return h
    h = nonlin(bn(multifc(N, h, nout=1024)))
    shape = size*2, 7, 7
    if bn_flat:
        # Batch normalize, then reshape to image.
        # (Each individual pixel of reshaped image is treated as a separate
        # channel in batch norm. This is what was done in the original code.)
        h = acts(N.FC(h, nout=np.prod(shape)))
        # recompute channel_dim in case it was altered by acts
        channel_dim = np.prod(h.shape[1:]) // np.prod(shape[1:])
        assert channel_dim * np.prod(shape[1:]) == np.prod(h.shape[1:])
        shape = (channel_dim, ) + shape[1:]
        h = N.Reshape(h, shape=((-1, ) + shape))
    else:
        h = acts(N.FC(h, nout=shape))
    h = acts(N.Deconv(h, nout=size*1, ksize=5, stride=2), ksize=5)
    h =      N.Deconv(h, nout=  nout, ksize=5, stride=2)
    h = N.Sigmoid(h) # generate images in [0, 1] range
    return h, N
项目:tree_rnn    作者:ofirnachum    | 项目源码 | 文件源码
def create_recursive_unit(self):
        def unit(parent_x, child_h, child_exists):  # assumes emb_dim == hidden_dim
            return parent_x + T.prod((child_h - 1) * child_exists.dimshuffle(0, 'x') + 1,
                                     axis=0)
        return unit
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def get_output(self):
        return T.reshape(self.input, (self.input.shape[0], T.prod(self.input.shape) // self.input.shape[0]))
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def get_output_shape(self):
        input_shape = self.input_shape
        if not all(input_shape[1:]):
            raise Exception('The shape of the input to "Flatten" '
                            'is not fully defined '
                            '(got ' + str(input_shape[1:]) + '. '
                            'Make sure to pass a complete "input_shape" '
                            'or "batch_input_shape" argument to the first '
                            'layer in your model.')
        return (input_shape[0], np.prod(input_shape[1:]))
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def batched_gram5d(self, fmap):
        # (layer, batch, featuremaps, height*width)
        fmap=fmap.flatten(ndim=4)

        # (layer*batch, featuremaps, height*width)
        fmap2=fmap.reshape((-1, fmap.shape[-2], fmap.shape[-1]))

        # The T.prod term can't be taken outside as a T.mean in style_loss(), since the width and height of the image might vary
        return T.batched_dot(fmap2, fmap2.dimshuffle(0,2,1)).reshape(fmap.shape)/T.prod(fmap.shape[-2:])
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def batched_gram(self, fmap):
        # (batch, featuremaps, height*width)
        fmap=fmap.flatten(ndim=3)

        # The T.prod term can't be taken outside as a T.mean in style_loss(), since the width and height of the image might vary
        if self.net_type == 0:
            return T.batched_dot(fmap, fmap.dimshuffle(0,2,1))/T.prod(fmap.shape[-2:])
        elif self.net_type == 1:
            return T.batched_dot(fmap, fmap.dimshuffle(0,2,1))/T.prod(fmap.shape[-1])
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
        old_p = old_dist_info_vars["p"]
        new_p = new_dist_info_vars["p"]
        return TT.prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
                       axis=-1)
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_psi1_numpy(self, lls, lsf, xmean, xvar, z):
        ls2 = np.exp(2.0*lls)
        sf2 = np.exp(2.0*lsf)
        ls2pxvar = ls2 + xvar
        constterm1 = ls2 / ls2pxvar
        constterm2 = np.prod(np.sqrt(constterm1))
        r2_psi1 = ((xmean - z[None, :, :])**2.0 / ls2pxvar)\
            .sum(2)
        psi1 = sf2*constterm2*np.exp(-0.5*r2_psi1)
        return psi1
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_psi2_numpy(self, lls, lsf, xmean, xvar, z):
        ls2 = np.exp(2.0*lls)
        sf2 = np.exp(2.0*lsf)
        ls2p2xvar = ls2 + 2.0*xvar
        constterm1 = ls2 / ls2p2xvar
        constterm2 = np.prod(np.sqrt(constterm1))
        z1mz2 = z[:, None, :] - z[None, :, :]
        expo1 = -z1mz2**2 / (4.0*ls2)
        z1pz2 = z[:, None, :] + z[None, :, :]
        z1pz2mx = z1pz2 - 2.0*xmean
        expo2 = -z1pz2mx**2.0 / (4.0*ls2p2xvar)
        expoterm = np.exp((expo1 + expo2).sum(2))
        psi2 = sf2**2.0 * constterm2 * expoterm
        return psi2
项目:deepGP_approxEP    作者:thangbui    | 项目源码 | 文件源码
def compute_psi1_theano(self, lls, lsf, xmean, xvar, z):
        ls2 = T.exp(2.0*lls)
        sf2 = T.exp(2.0*lsf)
        ls2pxvar = ls2 + xvar
        constterm1 = ls2 / ls2pxvar
        constterm2 = T.prod(T.sqrt(constterm1))
        r2_psi1 = ((xmean - z[None, :, :])**2.0 / ls2pxvar).sum(2)
        psi1 = sf2[0]*constterm2*T.exp(-0.5*r2_psi1)
        return psi1
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def count_params(x):
    """Returns the number of scalars in a tensor.

    Return: numpy integer.
    """
    return np.prod(x.shape.eval())
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    """Multiply the values in a tensor, alongside the specified axis.
    """
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def batch_flatten(x):
    """Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    """
    # TODO: `keras_shape` inference.
    x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
    return x
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def count_params(x):
    """Returns the number of scalars in a tensor.

    Return: numpy integer.
    """
    # We don't want those compilation to show up in Theano profiler.
    f = theano.function([], x.shape, profile=False)
    return np.prod(f())
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def prod(x, axis=None, keepdims=False):
    """Multiply the values in a tensor, alongside the specified axis.
    """
    return T.prod(x, axis=axis, keepdims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def flatten(x):
    y = T.flatten(x)
    if hasattr(x, '_keras_shape'):
        if None in x._keras_shape:
            y._keras_shape = (None,)
        else:
            y._keras_shape = (np.prod(x._keras_shape), )
    return y
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def batch_flatten(x):
    """Turn a n-D tensor into a 2D tensor where
    the first dimension is conserved.
    """
    y = T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
    if hasattr(x, '_keras_shape'):
        if None in x._keras_shape[1:]:
            y._keras_shape = (x._keras_shape[0], None)
        else:
            y._keras_shape = (x._keras_shape[0], np.prod(x._keras_shape[1:]))
    return y
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_sum_prod_all_to_none(self):
        a = T.tensor3()
        input = numpy.arange(3 * 4 * 5, dtype=config.floatX).reshape(3, 4, 5)
        # test sum
        f = theano.function([a], a.sum(), mode=self.mode)
        assert len(f.maker.fgraph.apply_nodes) == 1
        utt.assert_allclose(f(input), input.sum())
        # test prod
        f = theano.function([a], a.prod(), mode=self.mode)
        assert len(f.maker.fgraph.apply_nodes) == 1
        utt.assert_allclose(f(input), input.prod())
        # test sum
        f = theano.function([a], a.sum([0, 1, 2]), mode=self.mode)
        assert len(f.maker.fgraph.apply_nodes) == 1
        utt.assert_allclose(f(input), input.sum())
        # test prod
        f = theano.function([a], a.prod([0, 1, 2]), mode=self.mode)
        assert len(f.maker.fgraph.apply_nodes) == 1
        utt.assert_allclose(f(input), input.prod())

        backup = config.warn.sum_sum_bug
        config.warn.sum_sum_bug = False
        try:
            f = theano.function([a], a.sum(0).sum(0).sum(0), mode=self.mode)
            assert len(f.maker.fgraph.apply_nodes) == 1
            utt.assert_allclose(f(input), input.sum())
        finally:
            config.warn.sum_sum_bug = backup
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_prod_upcast(self):
        s = theano.tensor.lscalar()
        a = theano.tensor.alloc(numpy.asarray(5, dtype='float32'), s, s)
        orig = theano.config.warn_float64
        theano.config.warn_float64 = "raise"
        try:
            f = theano.function([s], a.prod())
            f(5)
        finally:
            theano.config.warn_float64 = orig
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_all_0(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x)], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_all_1(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True))()
            f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reduce_broadcast_some_1(self):
        for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
                    tensor.max, tensor.min]:
            x = T.TensorType('int64', (True, True, True))()
            f = theano.function([x], [fct(x, axis=[0, 2])], mode=self.mode)
            assert not any([
                isinstance(node.op, T.CAReduce)
                for node in f.maker.fgraph.toposort()])