Python theano.tensor 模块,sgn() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用theano.tensor.sgn()

项目:iaf    作者:openai    | 项目源码 | 文件源码
def discretized_laplace(mean, logscale, binsize, sample=None):
    scale = .5*T.exp(logscale)
    if sample is None:
        u = G.rng_curand.uniform(size=mean.shape) - .5
        sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
        sample = T.floor(sample/binsize)*binsize #discretize the sample

    d = .5*binsize
    def cdf(x):
        z = x-mean
        return .5 + .5 * T.sgn(z) * (1.-T.exp(-abs(z)/scale))
    def logmass1(x):
        # General method for probability mass, but numerically unstable for large |x-mean|/scale
        return T.log(cdf(x+d) - cdf(x-d) + 1e-7)
    def logmass2(x):
        # Only valid for |x-mean| >= d
        return -abs(x-mean)/scale + T.log(T.exp(d/scale)-T.exp(-d/scale)) - np.log(2.).astype(G.floatX) 
    def logmass_stable(x):
        switch = (abs(x-mean) < d)
        return switch * logmass1(x) + (1-switch) * logmass2(x)

    logp = logmass_stable(sample).flatten(2).sum(axis=1)
    entr = None #(1 + logscale).flatten(2).sum(axis=1)
    return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
项目:LiviaNET    作者:josedolz    | 项目源码 | 文件源码
def applyActivationFunction_ReLU_v4(inputData):

    return (T.sgn(inputData) + 1) * inputData * 0.5    

# *** LeakyReLU ***
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def step_sample(self, epsilon, p):
        dim = p.shape[p.ndim-1] // self.scale
        mu = _slice(p, 0, dim)
        log_b = _slice(p, 1, dim)
        return mu + T.exp(log_b) * T.sgn(epsilon) * T.log(1.0 - 2 * abs(epsilon))
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def _laplace(trng, p, size=None):
    dim = p.shape[p.ndim-1] // 2
    mu = _slice(p, 0, dim)
    log_b = _slice(p, 1, dim)
    if size is None:
        size = mu.shape
    epsilon = trng.uniform(size=size, dtype=floatX) - 0.5
    return mu + T.exp(log_b) * T.sgn(epsilon) * T.log(1.0 - 2 * abs(epsilon))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:SteinGAN    作者:DartML    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def __abs__(self, other):
        assert hasattr(self, 'out'), 'all layers need a default output'
        new_obj = utils.copy(self)
        new_obj.out = abs(new_obj.out)
        if hasattr(new_obj, 'grads'):
            new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
        return new_obj
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def __abs__(self, other):
        assert hasattr(self, 'out'), 'all layers need a default output'
        new_obj = utils.copy(self)
        new_obj.out = abs(new_obj.out)
        if hasattr(new_obj, 'grads'):
            new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
        return new_obj
项目:Deep-Learning-with-Theano    作者:PacktPublishing    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def sign(self, x):
        return T.sgn(x)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def fd3(mlp, fdm, params, globalLR1, globalLR2, momentParam1, momentParam2):

    cost1 = mlp.classError1 + mlp.penalty
    gradT1reg = T.grad(cost1, mlp.paramsT2)        

    updateT1 = []; updateT2 = []; onlyT2param = []    
    # take opt from Adam?
    if params.opt2 in ['adam']: opt2 = adam()
    else: opt2 = None    

    # update W - (1) + (3)            
    for param, uC1, uC2 in zip(mlp.paramsT1, fdm.updateC1T1, fdm.updateC2T1):                               
        updateT1 += [(param, param + uC1 - uC2)]

    # compute grad T2 of C1,  update T2 - [(4) - (2) ] / lr1
    for param, grad, gT2 in zip(mlp.paramsT2, gradT1reg, fdm.gradC1T2):   
        if params.T2onlySGN:
           grad_proxi = T.sgn((grad - gT2)/step*globalLR1)
        else:
           grad_proxi = (grad - gT2)/step*globalLR1

        tempUp, tempPair, _ = update_fun(param, T.reshape(grad_proxi, param.shape), None,
                              'T2', {}, opt2, params,
                              globalLR1, globalLR2, momentParam1, momentParam2)
        updateT2 += tempUp
        onlyT2param += tempPair        


    debugs = [check for (_, check) in onlyT2param]  
    return updateT1 + updateT2, debugs
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:WEARING    作者:nlkim0817    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:3D_Conditional_Gan    作者:yilei0620    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:corelm    作者:nusnlp    | 项目源码 | 文件源码
def get_function(self, func_name):
        if func_name == 'tanh':
            return T.tanh
        elif func_name == 'hardtanh':
            L.warning('Current hardTanh implementation is slow!')
            return lambda x: ((abs(x) <= 1) * x) + ((1 < abs(x)) * T.sgn(x))
        elif func_name == 'xtanh':
            return lambda x: T.tanh(x) + 0.1 * x
        elif func_name == 'sigmoid':
            return T.nnet.sigmoid
        elif func_name == 'fastsigmoid':
            L.error('T.nnet.ultra_fast_sigmoid function has some problems')
        elif func_name == 'hardsigmoid':
            return T.nnet.hard_sigmoid
        elif func_name == 'xsigmoid':
            return lambda x: T.nnet.sigmoid(x) + 0.1 * x
        elif func_name == 'softplus':
            return T.nnet.softplus
        elif func_name == 'relu':
            #return lambda x: T.maximum(x, 0)
            return lambda x: x * (x > 0)
            #return T.nnet.relu # Update theano and then use this one instead
        elif func_name == 'leakyrelu':
            return lambda x: T.maximum(x, 0.01 * x)
        elif func_name == 'cappedrelu':
            return lambda x: T.minimum(x * (x > 0), 6)
        elif func_name == 'softmax':
            return T.nnet.softmax
        elif func_name == 'norm1':
            return lambda x: x / T.nlinalg.norm(x, 1)
        elif func_name == 'norm2':
            #return lambda x: x / T.nlinalg.norm(x, 2)
            return lambda x: x / T.dot(x, x)**0.5
        else:
            L.error('Invalid function name given: ' + func_name)
项目:bigan    作者:jeffdonahue    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        if self.ignored(p):
            return g
        if self.l2 != 0:
            g += p * self.l2
        if self.l1 != 0:
            g += T.sgn(p) * self.l1
        return g
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def _get_updates_for(self, param, grad):
        grad_tm1 = shared_like(param, 'grad')
        step_tm1 = shared_like(param, 'step', self.learning_rate.eval())
        test = grad * grad_tm1
        diff = TT.lt(test, 0)
        steps = step_tm1 * (TT.eq(test, 0) +
                            TT.gt(test, 0) * self.step_increase +
                            diff * self.step_decrease)
        step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
        grad = grad - diff * grad
        yield param, param - TT.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step
项目:textGAN_public    作者:dreasysnail    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:textGAN_public    作者:dreasysnail    | 项目源码 | 文件源码
def __init__(self, model, e, a=0.5, verbose=2, iterator='linear'):

        self.verbose = verbose
        self.model = init(model)
        try:
            self.iterator = instantiate(iterators, iterator)
        except:
            self.iterator = instantiate(async_iterators, iterator)

        y_tr = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})
        y_te = self.model[-1].op({'dropout':False, 'bn_active':False, 'infer':False})
        y_inf = self.model[-1].op({'dropout':False, 'bn_active':True, 'infer':True})
        self.X = self.model[0].X
        self.Y = T.TensorType(theano.config.floatX, (False,)*(len(model[-1].out_shape)))()

        cost = T.nnet.categorical_crossentropy(y_tr, self.Y).mean()

        X_adv = self.X + e*T.sgn(T.grad(cost, self.X))

        self.model[0].X = X_adv
        y_tr_adv = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})

        cost_adv = a*cost + (1.-a)*T.nnet.categorical_crossentropy(y_tr_adv, self.Y).mean()

        te_cost = T.nnet.categorical_crossentropy(y_te, self.Y).mean()

        X_te_adv = self.X + e*T.sgn(T.grad(te_cost, self.X))

        self.updates = collect_updates(self.model, cost_adv)
        self.infer_updates = collect_infer_updates(self.model)
        self.reset_updates = collect_reset_updates(self.model)
        self._train = theano.function([self.X, self.Y], cost_adv, updates=self.updates)
        self._predict = theano.function([self.X], y_te)
        self._fast_sign = theano.function([self.X, self.Y], X_te_adv)
        self._infer = theano.function([self.X], y_inf, updates=self.infer_updates)
        self._reset = theano.function([], updates=self.reset_updates)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def sgn(x):
    """
    Elemwise signe of `x`.

    """
    # see decorator for function body
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_abs_mul_div(self):
        """
        test that if we have
        4 * x / abs(2*x) it get simplifier during canonicalisation.
        """

        x = T.dscalar()
        a = T.abs_(x)

        if theano.config.mode == 'FAST_COMPILE':
            mode = theano.compile.mode.get_mode('FAST_RUN').excluding(
                    "local_elemwise_fusion")
        else:
            mode = theano.compile.mode.get_default_mode().excluding(
                    "local_elemwise_fusion")

        f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn

        f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode)
        print(f.maker.fgraph.toposort())
        print()
        f(.1)
        f(-1)
        # some stabilization optimization make the output be finite instead of nan
        # debug_mode will raise an error when he see nan
        if not isinstance(mode, theano.compile.debugmode.DebugMode):
            assert numpy.isfinite(f(0))

        assert len(f.maker.fgraph.toposort()) == 2
        assert f.maker.fgraph.toposort()[0].op == T.sgn
项目:iaf    作者:openai    | 项目源码 | 文件源码
def laplace_diag(mean, logscale, sample=None):
    scale = .5*T.exp(logscale)
    if sample is None:
        u = G.rng_curand.uniform(size=mean.shape) - .5
        sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
    logp = (- logscale - abs(sample-mean) / scale).flatten(2).sum(axis=1)
    entr = (1 + logscale).flatten(2).sum(axis=1)
    return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def sign(x):
    return T.sgn(x)
项目:vgan    作者:Shuangfei    | 项目源码 | 文件源码
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g
项目:NMT-Coverage    作者:tuzhaopeng    | 项目源码 | 文件源码
def __abs__(self, other):
        assert hasattr(self, 'out'), 'all layers need a default output'
        new_obj = utils.copy(self)
        new_obj.out = abs(new_obj.out)
        if hasattr(new_obj, 'grads'):
            new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
        return new_obj
项目:NMT-Coverage    作者:tuzhaopeng    | 项目源码 | 文件源码
def __abs__(self, other):
        assert hasattr(self, 'out'), 'all layers need a default output'
        new_obj = utils.copy(self)
        new_obj.out = abs(new_obj.out)
        if hasattr(new_obj, 'grads'):
            new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
        return new_obj
项目:bmlingam    作者:taku-y    | 项目源码 | 文件源码
def _indvdl_gg(
    hparams, std_x, n_samples, L_cov, Normal, Gamma, Deterministic, sgn, gamma, 
    floatX, cholesky, tt, verbose):
    # Uniform distribution on sphere
    gs = Normal('gs', np.float32(0.0), np.float32(1.0), 
                shape=(n_samples, 2), dtype=floatX)
    ss = Deterministic('ss', gs + sgn(sgn(gs) + np.float32(1e-10)) * 
                             np.float32(1e-10))
    ns = Deterministic('ns', ss.norm(L=2, axis=1)[:, np.newaxis])
    us = Deterministic('us', ss / ns)

    # Scaling s.t. variance to 1
    n = 2 # dimension
    beta = np.float32(hparams['beta_coeff'])
    m = n * gamma(0.5 * n / beta) \
        / (2 ** (1 / beta) * gamma((n + 2) / (2 * beta)))
    L_cov_ = (np.sqrt(m) * cholesky(L_cov)).astype(floatX)

    # Scaling to v_indvdls
    scale1 = np.float32(std_x[0] * hparams['v_indvdl_1'])
    scale2 = np.float32(std_x[1] * hparams['v_indvdl_2'])
    tt.set_subtensor(L_cov_[0, :], L_cov_[0, :] * scale1, inplace=True)
    tt.set_subtensor(L_cov_[1, :], L_cov_[1, :] * scale2, inplace=True)

    # Draw samples
    ts = Gamma(
        'ts', alpha=np.float32(n / (2 * beta)), beta=np.float32(.5), 
        shape=n_samples, dtype=floatX
    )[:, np.newaxis]
    mus_ = Deterministic(
        'mus_', ts**(np.float32(0.5 / beta)) * us.dot(L_cov_)
    )
    mu1s_ = mus_[:, 0]
    mu2s_ = mus_[:, 1]

    if 10 <= verbose:
        print('GG for individual effect')
        print('gs.dtype = {}'.format(gs.dtype))
        print('ss.dtype = {}'.format(ss.dtype))
        print('ns.dtype = {}'.format(ns.dtype))
        print('us.dtype = {}'.format(us.dtype))
        print('ts.dtype = {}'.format(ts.dtype))

    return mu1s_, mu2s_