Python numpy 模块,power() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.power()

项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """The derivative of :meth:`tanh` functions is

        .. math:: \\frac{d}{dx} tanh(x) & = \\frac{d}{dx} \\frac{sinh(x)}{cosh(x)} \\\\
                  & = \\frac{cosh(x) \\frac{d}{dx}sinh(x) - sinh(x) \\frac{d}{dx}cosh(x) }{ cosh^2(x)} \\\\
                  & = \\frac{ cosh(x) cosh(x) - sinh(x) sinh(x) }{ cosh^2(x)}  \\\\
                  & = 1 - tanh^2(x) 

        Returns
        -------
        float32 
            The derivative of tanh function.
        """
        last_forward = self.forward(input) if input else self.last_forward
        return 1 - np.power(last_forward, 2)


# tanh-end
# relu-start
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def update(self, params, grads):
        # init cache and delta
        if self.cache is None:
            self.cache = [_zero(p.shape) for p in params]
        if self.delta is None:
            self.delta = [_zero(p.shape) for p in params]

        # update parameters
        for i, (c, d, p, g) in enumerate(zip(self.cache, self.delta, params, grads)):
            c = self.rho * c + (1 - self.rho) * np.power(g, 2)
            update = g * np.sqrt(d + self.epsilon) / np.sqrt(c + self.epsilon)
            p -= self.lr * update
            d = self.rho * d + (1 - self.rho) * np.power(update, 2)

            self.cache[i] = c
            self.delta[i] = d
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def update(self, params, grads):
        # init
        self.iterations += 1
        a_t = self.lr * np.sqrt(1 - np.power(self.beta2, self.iterations)) / \
              (1 - np.power(self.beta1, self.iterations))
        if self.ms is None:
            self.ms = [_zero(p.shape) for p in params]
        if self.vs is None:
            self.vs = [_zero(p.shape) for p in params]

        # update parameters
        for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)):
            m = self.beta1 * m + (1 - self.beta1) * g
            v = self.beta2 * v + (1 - self.beta2) * np.power(g, 2)
            p -= a_t * m / (np.sqrt(v) + self.epsilon)

            self.ms[i] = m
            self.vs[i] = v
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def update(self, params, grads):
        # init
        self.iterations += 1
        a_t = self.lr / (1 - np.power(self.beta1, self.iterations))
        if self.ms is None:
            self.ms = [_zero(p.shape) for p in params]
        if self.vs is None:
            self.vs = [_zero(p.shape) for p in params]

        # update parameters
        for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)):
            m = self.beta1 * m + (1 - self.beta1) * g
            v = np.maximum(self.beta2 * v, np.abs(g))
            p -= a_t * m / (v + self.epsilon)

            self.ms[i] = m
            self.vs[i] = v
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, outputs, targets):
        """MeanSquaredError forward propagation. 

        .. math:: L = (p - t)^2

        Parameters
        ----------
        outputs, targets : numpy.array 
            The arrays to compute the squared difference between.

        Returns
        -------
        numpy.array 
            An expression for the element-wise squared difference.
        """
        return 0.5 * np.mean(np.sum(np.power(outputs - targets, 2), axis=1))
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, outputs, targets):
        """HellingerDistance forward propagation. 

        Parameters
        ----------
        outputs : numpy 2D array
            outputs in (0, 1), such as softmax output of a neural network,
            with data points in rows and class probabilities in columns.
        targets : numpy 2D array 
            Either a vector of int giving the correct class index per data point
            or a 2D tensor of one-hot encoding of the correct class in the same
            layout as predictions (non-binary targets in [0, 1] do not work!)

        Returns
        -------
        numpy 1D array
            An expression for the Hellinger Distance
        """
        root_difference = np.sqrt(outputs) - np.sqrt(targets)
        return np.mean(np.sum(np.power(root_difference, 2), axis=1) / np.sqrt(2))
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def evaluation(self, X_test, y_test):
        # normalization
        X_test = self.normalization(X_test)

        # average over the output
        pred_y_test = np.zeros([self.M, len(y_test)])
        prob = np.zeros([self.M, len(y_test)])

        '''
            Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
        '''
        for i in range(self.M):
            w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
            pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
            prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
        pred = np.mean(pred_y_test, axis=0)

        # evaluation
        svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
        svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))

        return (svgd_rmse, svgd_ll)
项目:mx-lsoftmax    作者:luoyetx    | 项目源码 | 文件源码
def plot_beta():
    '''plot beta over training
    '''
    beta = args.beta
    scale = args.scale
    beta_min = args.beta_min
    num_epoch = args.num_epoch
    epoch_size = int(float(args.num_examples) / args.batch_size)

    x = np.arange(num_epoch*epoch_size)
    y = beta * np.power(scale, x)
    y = np.maximum(y, beta_min)
    epoch_x = np.arange(num_epoch) * epoch_size
    epoch_y = beta * np.power(scale, epoch_x)
    epoch_y = np.maximum(epoch_y, beta_min)

    # plot beta descent curve
    plt.semilogy(x, y)
    plt.semilogy(epoch_x, epoch_y, 'ro')
    plt.title('beta descent')
    plt.ylabel('beta')
    plt.xlabel('epoch')
    plt.show()
项目:piperine    作者:DNA-and-Natural-Algorithms-Group    | 项目源码 | 文件源码
def BM_Eval(seq_dict, BMlist, toeholds):
    w_exp = np.concatenate([np.zeros((5,)), np.power(2, np.arange(6))])
    BM_score = 0
    Largest_match = 0

    numstrings = len(BMlist);

    prog = MyProgress((numstrings**2 - numstrings)/2)
    for ctr in range(numstrings):
        strand1 = BMlist[ctr];
        for strand2 in BMlist[ctr+1:]:
            [ismaxmatch, maxmatch, mm_i, mm_j] = \
                compare_sequence_notoe(seq_dict[strand1],
                                       seq_dict[strand2],
                                       toeholds)
            if maxmatch > Largest_match:
                Largest_match = maxmatch

            BM_score = BM_score + w_exp[int(min(maxmatch, 10))]
            prog.inc()

    return [BM_score, Largest_match]
项目:coursera_ML_in_python    作者:whyjay17    | 项目源码 | 文件源码
def computeCost(X, y, theta):
    inner = np.power(((X * theta.T) - y), 2)
    return np.sum(inner) / (2 * len(X))

#def gradientDescent(X, y, theta, alpha, iters):
#    temp = np.matrix(np.zeros(theta.shape))
#    params = int(theta.ravel().shape[1]) #flattens
#    cost = np.zeros(iters)
#
#    for i in range(iters):
#        err = (X * theta.T) - y
#        
#        for j in range(params):
#            term = np.multiply(err, X[:,j])
#            temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
#        
#        theta = temp
#        cost[i] = computeCost(X, y, theta)
#    
#    return theta, cost
项目:gbrs    作者:churchill-lab    | 项目源码 | 文件源码
def get_genotype_probability(aln_profile, aln_specificity, sigma=0.12):
    # 'aln_specificity' should be a set of unit vectors (at least one of the entry is larger than 1.)
    num_haps = len(aln_profile)
    aln_vec = unit_vector(aln_profile)
    genoprob = []
    for i in xrange(num_haps):
        v1 = unit_vector(aln_specificity[i])
        for j in xrange(i, num_haps):
            if j == i:
                genoprob.append(sum(np.power(aln_vec - v1, 2))) # homozygotes
            else:
                v2 = unit_vector(aln_specificity[j])
                geno_vec = unit_vector(v1 + v2)
                # compute directional similarity
                genoprob.append(sum(np.power(aln_vec - geno_vec, 2))) # for heterozygotes
    genoprob = np.exp(np.array(genoprob) / (-2 * sigma * sigma))
    return np.array(genoprob / sum(genoprob))
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def set_params(mo, bparams):
    i = 0
    for la in mo.layers:
        we = bparams[i:i+2]
        print len(we)
        la.set_weights(we)
        i += 2
    return mo

#with open("best_model_keras.pkl", 'r') as f:
#    b_params = pkl.load(f)
#
#model = set_params(model, b_params)
#out = model.predict(xvl, batch_size=xvl.shape[0], verbose=0)
#error = np.mean(np.mean(np.power(out - yvl, 2), axis=1))
#print "Error vl", error
#sys.exit()

#init_p = get_params(model)
#with open("init_keras_param.pkl", 'w') as f:
#    pkl.dump(init_p, f)
项目:CRN_ProbabilisticInversion    作者:elaloy    | 项目源码 | 文件源码
def CompLikelihood(X,fx,MCPar,Measurement,Extra):

    Sigma=Measurement.Sigma*np.ones((X.shape[0]))
    of=np.zeros((fx.shape[0],1))
    p=np.zeros((fx.shape[0],1))
    log_p=np.zeros((fx.shape[0],1))
    for ii in xrange(0,fx.shape[0]):
        e=Measurement.MeasData-fx[ii,:]

        of[ii,0]=np.sqrt(np.sum(np.power(e,2.0))/e.shape[1])
        if MCPar.lik==2: # Compute standard uncorrelated and homoscedastic Gaussian log-likelihood
            log_p[ii,0]= - ( Measurement.N / 2.0) * np.log(2.0 * np.pi) - Measurement.N * np.log( Sigma[ii] ) - 0.5 * np.power(Sigma[ii],-2.0) * np.sum( np.power(e,2.0) )
            p[ii,0]=(1.0/np.sqrt(2*np.pi* Sigma[ii]**2))**Measurement.N * np.exp(- 0.5 * np.power(Sigma[ii],-2.0) * np.sum( np.power(e,2.0) ))

        if MCPar.lik==3: # Box and Tiao (1973) log-likelihood formulation with Sigma integrated out based on prior of the form p(sigma) ~ 1/sigma
            log_p[ii,0]= - ( Measurement.N / 2.0) * np.log(np.sum(np.power(e,2.0))) 
            p[ii,0]=np.exp(log_p[ii,0])
    return of, p, log_p
项目:pyshearlab    作者:stefanloock    | 项目源码 | 文件源码
def SLcomputeSNR(X, Xnoisy):
    """
    SLcomputeSNR Compute signal to noise ratio (SNR).

    Usage:

        SNR = SLcomputeSNR(X, Xnoisy)

    Input:

        X:      2D or 3D signal.
        Xnoisy: 2D or 3D noisy signal.

    Output:

        SNR: The signal to noise ratio (in dB).
    """

    if np.linalg.norm(X-Xnoisy) == 0:
        return np.Inf
    else:
        return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def __init__(self, path, normalize=True, eig=0.0, transpose=False):
        if transpose:
            ut = np.load(path + '.vt.npy')
            self.wi, self.iw = load_vocabulary(path + '.contexts.vocab')
        else:
            ut = np.load(path + '.ut.npy')
            self.wi, self.iw = load_vocabulary(path + '.words.vocab')
        s = np.load(path + '.s.npy')

        if eig == 0.0:
            self.m = ut.T
        elif eig == 1.0:
            self.m = s * ut.T
        else:
            self.m = np.power(s, eig) * ut.T

        self.dim = self.m.shape[1]

        if normalize:
            self.normalize()
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def run(count_path, out_path, smooth=0, cds=True, normalize=False, neg=1):
    counts = create_representation("Explicit", count_path, normalize=False)
    old_mat = counts.m
    index = counts.wi
    smooth = old_mat.sum() * smooth

    # getting marginal probs
    row_probs = old_mat.sum(1) + smooth
    col_probs = old_mat.sum(0) + smooth
    if cds:
        col_probs = np.power(col_probs, 0.75)
    row_probs = row_probs / row_probs.sum()
    col_probs = col_probs / col_probs.sum()

    # building PPMI matrix
    ppmi_mat = make_ppmi_mat(old_mat, row_probs, col_probs, smooth, neg=neg, normalize=normalize)
    import pyximport
    pyximport.install(setup_args={"include_dirs": np.get_include()})
    from representations import sparse_io
    sparse_io.export_mat_eff(ppmi_mat.row, ppmi_mat.col, ppmi_mat.data, out_path + ".bin")
    util.write_pickle(index, out_path + "-index.pkl")
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def __init__(self, path, normalize=True, eig=0.0, **kwargs):
        ut = np.load(path + '-u.npy', mmap_mode="c")
        s = np.load(path + '-s.npy', mmap_mode="c")
        vocabfile = path + '-vocab.pkl'
        self.iw = load_pickle(vocabfile)
        self.wi = {w:i for i, w in enumerate(self.iw)}

        if eig == 0.0:
            self.m = ut
        elif eig == 1.0:
            self.m = s * ut
        else:
            self.m = np.power(s, eig) * ut

        self.dim = self.m.shape[1]

        if normalize:
            self.normalize()
项目:DriverPower    作者:smshuai    | 项目源码 | 文件源码
def dispersion_test(yhat, y, k=100):
    """ Implement the regression based dispersion test with k re-sampling.

    Args:
        yhat (np.array): predicted mutation count
        y (np.array): observed mutation count
        k (int):

    Returns:
        float, float: p-value, theta

    """
    theta = 0
    pval = 0
    for i in range(k):
        y_sub, yhat_sub = resample(y, yhat, random_state=i)
        # (np.power((y - yhat), 2) - y) / yhat for Poisson regression
        aux = (np.power((y_sub - yhat_sub), 2) - yhat_sub) / yhat_sub
        mod = sm.OLS(aux, yhat_sub)
        res = mod.fit()
        theta += res.params[0]
        pval += res.pvalues[0]
    theta = theta/k
    pval = pval/k
    return pval, theta
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def _transform_y(y, lam):
    """Transform a single y, given a single lambda value.
    No validation performed.

    Parameters
    ----------

    y : array_like, shape (n_samples,)
       The vector being transformed

    lam : ndarray, shape (n_lambdas,)
       The lambda value used for the transformation
    """
    # ensure np array
    y = np.array(y)
    y_prime = np.array([(np.power(x, lam) - 1) / lam if not _eqls(lam, ZERO) else log(x) for x in y])

    # rarely -- very rarely -- we can get a NaN. Why?
    return y_prime
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def _yj_trans_single_x(x, lam):
    if x >= 0:
        # Case 1: x >= 0 and lambda is not 0
        if not _eqls(lam, ZERO):
            return (np.power(x + 1, lam) - 1.0) / lam

        # Case 2: x >= 0 and lambda is zero
        return log(x + 1)
    else:
        # Case 2: x < 0 and lambda is not two
        if not lam == 2.0:
            denom = 2.0 - lam
            numer = np.power((-x + 1), (2.0 - lam)) - 1.0
            return -numer / denom

        # Case 4: x < 0 and lambda is two
        return -log(-x + 1)
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def std(files, batch_size=128):
    s = np.zeros(3)
    s2 = np.zeros(3)
    shape = None
    for i in range(0, len(files), batch_size):
        print("done with {:>3} / {} images".format(i, len(files)))
        images = np.array(data.load_image(files[i : i + batch_size]),
                          dtype=np.float64)
        shape = images.shape
        s += images.sum(axis=(0, 2, 3))
        s2 += np.power(images, 2).sum(axis=(0, 2, 3))
    n = len(files) * shape[2] * shape[3]
    var = (s2 - s**2.0 / n) / (n - 1)

    print('mean')
    print((s / n).astype(np.float32))
    print('std')
    print(np.sqrt(var))
    #return np.sqrt(var)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_half_coercion(self):
        """Test that half gets coerced properly with the other types"""
        a16 = np.array((1,), dtype=float16)
        a32 = np.array((1,), dtype=float32)
        b16 = float16(1)
        b32 = float32(1)

        assert_equal(np.power(a16, 2).dtype, float16)
        assert_equal(np.power(a16, 2.0).dtype, float16)
        assert_equal(np.power(a16, b16).dtype, float16)
        assert_equal(np.power(a16, b32).dtype, float16)
        assert_equal(np.power(a16, a16).dtype, float16)
        assert_equal(np.power(a16, a32).dtype, float32)

        assert_equal(np.power(b16, 2).dtype, float64)
        assert_equal(np.power(b16, 2.0).dtype, float64)
        assert_equal(np.power(b16, b16).dtype, float16)
        assert_equal(np.power(b16, b32).dtype, float32)
        assert_equal(np.power(b16, a16).dtype, float16)
        assert_equal(np.power(b16, a32).dtype, float32)

        assert_equal(np.power(a32, a16).dtype, float32)
        assert_equal(np.power(a32, b16).dtype, float32)
        assert_equal(np.power(b32, a16).dtype, float16)
        assert_equal(np.power(b32, b16).dtype, float32)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __ipow__(self, other):
        """
        Raise self to the power other, in place.

        """
        other_data = getdata(other)
        other_mask = getmask(other)
        with np.errstate(divide='ignore', invalid='ignore'):
            self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
                                         other_data))
        invalid = np.logical_not(np.isfinite(self._data))
        if invalid.any():
            if self._mask is not nomask:
                self._mask |= invalid
            else:
                self._mask = invalid
            np.copyto(self._data, self.fill_value, where=invalid)
        new_mask = mask_or(other_mask, invalid)
        self._mask = mask_or(self._mask, new_mask)
        return self
项目:AutoML4    作者:djajetic    | 项目源码 | 文件源码
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
项目:automl_gpu    作者:abhishekkrthakur    | 项目源码 | 文件源码
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def calc_stoi_from_spec(clean_spec, degraded_spec, analysis_len=30):
    freq_bins = np.size(clean_spec, 0)
    frames = np.size(clean_spec, 1)
    x = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    y = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            x[j, m] = clean_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = degraded_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = np.minimum(np.linalg.norm(x[j,m,:])/np.linalg.norm(y[j,m,:])*y[j,m,:],
                                 (1.+np.power(10., 15./20.))*x[j,m,:])  # y is normalized and clipped
    x_mean = np.mean(x, axis=(0, 1))
    y_mean = np.mean(y, axis=(0, 1))
    score = 0.
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            score += np.dot(x[j, m, :] - x_mean, y[j, m, :] - y_mean) / \
                     (np.linalg.norm(x[j, m, :] - x_mean) * np.linalg.norm(y[j, m, :] - y_mean))
    score /= (freq_bins * analysis_len)
    return score
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
    fcoefs, f = make_erb_filters(sr, channel_number, 50)
    fcoefs = np.flipud(fcoefs)
    xf = erb_frilter_bank(xx, fcoefs)

    if win_type == 'hanning':
        window = np.hanning(channel_number)
    elif win_type == 'hamming':
        window = np.hamming(channel_number)
    elif win_type == 'triangle':
        window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
    else:
        window = np.ones(channel_number)
    window = window.reshape((channel_number, 1))

    xe = np.power(xf, 2.0)
    frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
    cochleagram = np.zeros((channel_number, frames))
    for i in range(frames):
        one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
        cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))

    cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
    return cochleagram
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def log_power_spectrum_extractor(x, win_len, shift_len, win_type, is_log=False):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spect = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'rectangle':
        window = np.ones(win_len)

    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        if is_log:
            spect[:, i] = np.log(np.power(np.abs(stft[0: win_len//2+1, i]), 2.))
        else:
            spect[:, i] = np.power(np.abs(stft[0: win_len//2+1, i]), 2.)

    return spect
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def unknown_feature_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def ams_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
项目:Auspex    作者:BBN-Q    | 项目源码 | 文件源码
def perp_fit(ts, vs):

    def lsq_macrospin(p, ts, vs):
        t0 = p[0]
        v0 = p[1]
        a = v0
        b = t0*v0
        to = 1
        vo = a + b/to

        # Here is what we expect
        vs_ideal = v0*(1.0 + t0/ts)
        Xs = []
        Ys = []
        for t,v in zip(ts,vs):
            ti,vi = find_closest(t,v,t0,v0)
            Xs.append(x2X(ti,to,b))
            Ys.append(y2Y(v,vi,a,b))
        return np.power(Ys,2)
    p0 = [0.2, 100]
    p, flag = leastsq(lsq_macrospin, p0, args=(ts, vs))
    return p
项目:Auspex    作者:BBN-Q    | 项目源码 | 文件源码
def find_null_offset(xpts, powers, default=0.0):
    """Finds the offset corresponding to the minimum power using a fit to the measured data"""
    def model(x, a, b, c):
        return a*(x - b)**2 + c
    powers = np.power(10, powers/10.)
    min_idx = np.argmin(powers)
    try:
        fit = curve_fit(model, xpts, powers, p0=[1, xpts[min_idx], powers[min_idx]])
    except RuntimeError:
        logger.warning("Mixer null offset fit failed.")
        return default, np.zeros(len(powers))
    best_offset = np.real(fit[0][1])
    best_offset = np.minimum(best_offset, xpts[-1])
    best_offset = np.maximum(best_offset, xpts[0])
    xpts_fine = np.linspace(xpts[0],xpts[-1],101)
    fit_pts = np.array([np.real(model(x, *fit[0])) for x in xpts_fine])
    if min(fit_pts)<0: fit_pts-=min(fit_pts)-1e-10 #prevent log of a negative number
    return best_offset, xpts_fine, 10*np.log10(fit_pts)
项目:CTAtools    作者:davidsanchez    | 项目源码 | 文件源码
def Get3FGL(Cat,xdata,ydata,dydata):
    #create a spectrum for a given catalog and compute the model+butterfly
    # 3FGL CATALOG
    Cat.MakeSpectrum("3FGL",1e-4,0.3)
    enerbut,but,enerphi,phi = Cat.Plot("3FGL")

    # read DATA Point from 3FGL CATALOG
    em3FGL,ep3FGL,flux3FGL,dflux3FGL =  Cat.GetDataPoints('3FGL') #energy in TeV since the user ask for that in the call of Cat
    ener3FGL = numpy.sqrt(em3FGL*ep3FGL) 
    dem3FGL = ener3FGL-em3FGL
    dep3FGL = ep3FGL-ener3FGL
    c=Cat.ReadPL('3FGL')[3]
    e2dnde3FGL = (-c+1)*flux3FGL*numpy.power(ener3FGL*1e6,-c+2)/(numpy.power((ep3FGL*1e6),-c+1)-numpy.power((em3FGL*1e6),-c+1))*1.6e-6
    de2dnde3FGL = e2dnde3FGL*dflux3FGL/flux3FGL

    for i in xrange(len(ener3FGL)):
        xdata.append(numpy.log10(ener3FGL[i]))
        ydata.append(numpy.log10(e2dnde3FGL[i]))
        dydata.append(numpy.log10(de2dnde3FGL[i]))

    return enerbut,but,enerphi,phi,ener3FGL, e2dnde3FGL, dem3FGL, dep3FGL, de2dnde3FGL
项目:Tacotron_pytorch    作者:root20    | 项目源码 | 文件源码
def spectrogram2wav(spectrogram, n_fft, win_length, hop_length, num_iters):
    '''
    spectrogram: [t, f], i.e. [t, nfft // 2 + 1]
    '''
    min_level_db = -100
    ref_level_db = 20

    spec = spectrogram.T
    # denormalize
    spec = (np.clip(spec, 0, 1) * - min_level_db) + min_level_db
    spec = spec + ref_level_db

    # Convert back to linear
    spec = np.power(10.0, spec * 0.05)

    return _griffin_lim(spec ** 1.5, n_fft, win_length, hop_length, num_iters)  # Reconstruct phase
项目:Quantrade    作者:quant-trade    | 项目源码 | 文件源码
def process_commissions(symbol, multiplied_symbols):
    try:
        symbol_ = Symbols.objects.filter(symbol=symbol).values('currency', 'spread', 'digits', 'tick_size', 'tick_value', 'broker', 'symbol')
        if settings.SHOW_DEBUG:
            print("Processing commisions for {}".format(symbol_))

        if any(symbol_[0]['symbol'] in s for s in multiplied_symbols):
            value = (((power(10.0, -symbol_[0]['digits']) * \
                float(symbol_[0]['spread'])) / float(symbol_[0]['tick_size'])) * \
                float(symbol_[0]['tick_value'])) * 100.0
        else:
            value = (((power(10.0, -symbol_[0]['digits']) * \
                float(symbol_[0]['spread'])) / float(symbol_[0]['tick_size'])) * \
                float(symbol_[0]['tick_value']))

        symbol.commission = value
        symbol.save()
    except Exception as err:
        print(colored.red("At process commissions {}".format(err)))
        symbol.commission = None
        symbol.save()
    if settings.SHOW_DEBUG:
        print("Updated commision value for {0}\n".format(symbol.symbol))
项目:focal-loss    作者:unsky    | 项目源码 | 文件源码
def forward(self, is_train, req, in_data, out_data, aux):

        cls_score = in_data[0].asnumpy()
        labels = in_data[1].asnumpy()
        self._labels = labels

        pro_ = np.exp(cls_score - cls_score.max(axis=1).reshape((cls_score.shape[0], 1)))
        pro_ /= pro_.sum(axis=1).reshape((cls_score.shape[0], 1))
      #  pro_ = mx.nd.SoftmaxActivation(cls_score) + 1e-14
       # pro_ = pro_.asnumpy()   
        self.pro_ = pro_
        # restore pt for backward

        self._pt = pro_[np.arange(pro_.shape[0],dtype = 'int'), labels.astype('int')]

        ### note!!!!!!!!!!!!!!!!
        # focal loss value is not used in this place we should forward the cls_pro in this layer, the focal vale should be calculated in metric.py
        # the method is in readme
        #  focal loss (batch_size,num_class)
        loss_ = -1 * np.power(1 - pro_, self._gamma) * np.log(pro_)
        print "---------------" 
        print 'pro:',pro_[1],labels[1]
        self.assign(out_data[0],req[0],mx.nd.array(pro_))
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def build_data_auto_encoder(data, step, win_size):
    count = data.shape[1] / float(step)
    docX = np.zeros((count, 3, win_size))

    for i in range(0, data.shape[1] - win_size, step):
        c = i / step
        docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size])
        docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2)
        docX[c][2] = np.pad(
            (data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]),
            (0, 1), 'constant', constant_values=0)
    data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2])

    return data
项目:hippylib    作者:hippylib    | 项目源码 | 文件源码
def __init__(self, prior, d, U):
        self.prior = prior        
        ones = np.ones( d.shape, dtype=d.dtype )        
        self.d = ones - np.power(ones + d, -.5)
        self.lrsqrt = LowRankOperator(self.d, U)
        self.help = Vector()
        self.init_vector(self.help, 0)
项目:hippylib    作者:hippylib    | 项目源码 | 文件源码
def trace2(self,W=None):
        """
        Compute the trace of A*A (Note this is the square of Frob norm, since A is symmetic).
        If the weight W is provided, it will compute the trace of (AW)^2.

        This is equivalent to 
        tr_W(A) = \sum_i lambda_i^2,
        where lambda_i are the generalized eigenvalues of
        A x = lambda W^-1 x.

        Note if U is a W-orthogonal matrix then
        tr_W(A) = \sum_i D(i,i)^2. 
        """
        if W is None:
            UtU = np.dot(self.U.T, self.U)
            dUtU = self.d[:,None] * UtU #diag(d)*UtU.
            tr2 = np.sum(dUtU*dUtU)
        else:
            WU = np.zeros(self.U.shape, dtype=self.U.dtype)
            u, wu = Vector(), Vector()
            W.init_vector(u,1)
            W.init_vector(wu,0)
            for i in range(self.U.shape[1]):
                u.set_local(self.U[:,i])
                W.mult(u,wu)
                WU[:,i] = wu.get_local()
            UtWU = np.dot(self.U.T, WU)
            dUtWU = self.d[:,None] * UtWU #diag(d)*UtU.
            tr2 = np.power(np.linalg.norm(dUtWU),2)

        return tr2
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def gaussian_2d(x, y, mx, my, cov):
    ''' x and y are the 2D coordinates to calculate the function value
        mx and my are the mean parameters in x and y axes
        cov is the 2x2 variance-covariance matrix'''
    ret = 0

    # ^^ YOUR CODE HERE ^^
    sigmax = np.sqrt(cov[0][0])
    sigmay = np.sqrt(cov[1][1])
    p = cov[0][1] / (np.sqrt(cov[0][0]) * np.sqrt(cov[1][1]))
    ret = (1 / (2 * np.pi * sigmax * sigmay * np.sqrt( 1 - np.power(p,2)))) * np.exp((( -1 / ( 2 * ( 1 - np.power(p,2)))) * ( ((np.power((x - mx), 2)) / (np.power(sigmax,2))) + ((np.power((y - my), 2)) / ( np.power(sigmay, 2))) - (( 2 * p * (x - mx) * (y - my)) / (sigmax * sigmay)))))

    return ret

## Finally, we compute the Gaussian function outputs for each entry in our mesh and plot the surface for each class.
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def pw2wav(features, feat_dim=513, fs=16000):
    ''' NOTE: Use `order='C'` to ensure Cython compatibility '''
    en = np.reshape(features['en'], [-1, 1])
    sp = np.power(10., features['sp'])
    sp = en * sp
    if isinstance(features, dict):
        return pw.synthesize(
            features['f0'].astype(np.float64).copy(order='C'),
            sp.astype(np.float64).copy(order='C'),
            features['ap'].astype(np.float64).copy(order='C'),
            fs,
        )
    features = features.astype(np.float64)
    sp = features[:, :feat_dim]
    ap = features[:, feat_dim:feat_dim*2]
    f0 = features[:, feat_dim*2]
    en = features[:, feat_dim*2 + 1]
    en = np.reshape(en, [-1, 1])
    sp = np.power(10., sp)
    sp = en * sp
    return pw.synthesize(
        f0.copy(order='C'),
        sp.copy(order='C'),
        ap.copy(order='C'),
        fs
    )
项目:sea-lion-counter    作者:rdinse    | 项目源码 | 文件源码
def applyColorAugmentation(self, img, std=0.55, gamma=2.5):
    '''Applies random color augmentation following [1].  An additional gamma
    transformation is added.

    [1] Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton.  ImageNet
        Classification with Deep Convolutional Neural Networks.  NIPS 2012.
    '''

    alpha = np.clip(np.random.normal(0, std, size=3), -1.3 * std, 1.3 * std)
    perturbation = self.data_evecs.dot((alpha * np.sqrt(self.data_evals)).T)
    gamma = 1.0 - sum(perturbation) / gamma
    return np.power(np.clip(img + perturbation, 0., 1.), gamma)
    return np.clip((img + perturbation), 0., 1.)
项目:sea-lion-counter    作者:rdinse    | 项目源码 | 文件源码
def applyColorAugmentation(img, std=0.5):
  '''Applies random color augmentation following [1].

  [1] Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton. \
    ImageNet Classification with Deep Convolutional Neural Networks. \
    NIPS 2012.'''

  alpha = np.clip(np.random.normal(0, std, size=3), -2 * std, 2. * std)
  perturbation = sld_evecs.dot((alpha * np.sqrt(sld_evals)).T)
  gamma = 1.0 - sum(perturbation) / 3.
  return np.power(np.clip(img + perturbation, 0., 1.), gamma)
  return np.clip((img + perturbation), 0., 1.)
项目:CLAM    作者:Xinglab    | 项目源码 | 文件源码
def ztnb_pmf(y, mu, alpha):
    r = 1.0 / alpha
    if y <= 0:
        raise Exception('y must be larger than 0.')
    p = mu/(mu+r+0.0)
    ztnbin_mpmath = lambda y, p, r: mpmath.gamma(y + r)/(mpmath.gamma(y+1)*mpmath.gamma(r))*np.power(1-p, r)*np.power(p, y)/(1-np.power(1-p, r))
    ztnbin = np.frompyfunc(ztnbin_mpmath, 3, 1)
    return float(ztnbin(y, p, r))
项目:CLAM    作者:Xinglab    | 项目源码 | 文件源码
def ztnb_cdf(y, mu, alpha):
    r = 1.0/alpha
    if y <= 0:
        raise Exception('y must be larger than 0.')
    p = mu/(mu+r+0.0)
    F_ztnb = ( 1 - special.btdtr(y+1, r, p) - np.power(1-p, r) ) / (1-np.power(1-p,r))
    return F_ztnb
项目:CLAM    作者:Xinglab    | 项目源码 | 文件源码
def expected_zeros(pseudo_size, mu, alpha):
    min_allowed_alpha=10**-4
    max_allowed_prob_zero=0.99
    if alpha < min_allowed_alpha:
        prob_zero = max_allowed_prob_zero
    else:
        prob_zero = np.min([np.power(1.0+alpha*mu, -1.0/alpha), 0.99])
    expected_zeros = int(pseudo_size*(prob_zero/(1-prob_zero)))
    return expected_zeros
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """Backward propagation.

        Returns
        -------
        float32 
            The derivative of Elliot function. 
        """
        last_forward = 1 + np.abs(input * self.steepness) if input else self.last_forward
        return 0.5 * self.steepness / np.power(last_forward, 2)


# elliot-end
# symmetric-elliot-start
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """Backward propagation.

        Returns
        -------
        float32 
            The derivative of SymmetricElliot function.
        """
        last_forward = 1 + np.abs(input * self.steepness) if input else self.last_forward
        return self.steepness / np.power(last_forward, 2)


# symmetric-elliot-end
# softplus-start