Python numpy 模块,exp() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.exp()

项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, h = -1):
        sq_dist = pdist(self.theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))

        # compute the rbf kernel

        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, self.theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(self.theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def update_hyper_param(self):
    for group in self._optimizer.param_groups:
      group['momentum'] = self._mu_t
      #group['momentum'] = max(self._mu, self._mu_t)
      if self._force_non_inc_step == False:
        group['lr'] = self._lr_t * self._lr_factor
        # a loose clamping to prevent catastrophically large move. If the move
        # is too large, we set lr to 0 and only use the momentum to move
        if self._adapt_clip and (group['lr'] * np.sqrt(self._global_state['grad_norm_squared']) >= self._catastrophic_move_thresh):
          group['lr'] = self._catastrophic_move_thresh / np.sqrt(self._global_state['grad_norm_squared'] + eps)
          if self._verbose:
            logging.warning("clip catastropic move!")
      elif self._iter > self._curv_win_width:
        # force to guarantee lr * grad_norm not increasing dramatically. 
        # Not necessary for basic use. Please refer to the comments
        # in YFOptimizer.__init__ for more details
        self.lr_grad_norm_avg()
        debias_factor = self.zero_debias_factor()
        group['lr'] = min(self._lr * self._lr_factor,
          2.0 * self._global_state["lr_grad_norm_avg_min"] \
          / (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) )
    return
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def test_quantize_from_probs2(size, resolution):
    set_random_seed(make_seed(size, resolution))
    probs = np.exp(np.random.random(size)).astype(np.float32)
    probs2 = probs.reshape((1, size))
    quantized = quantize_from_probs2(probs2, resolution)
    assert quantized.shape == probs2.shape
    assert quantized.dtype == np.int8
    assert np.all(quantized.sum(axis=1) == resolution)

    # Check that quantized result is closer to target than any other value.
    quantized = quantized.reshape((size, ))
    target = resolution * probs / probs.sum()
    distance = np.abs(quantized - target).sum()
    for combo in itertools.combinations(range(size), resolution):
        other = np.zeros(size, np.int8)
        for i in combo:
            other[i] += 1
        assert other.sum() == resolution
        other_distance = np.abs(other - target).sum()
        assert distance <= other_distance
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def observed_perplexity(self, counts):
        """Compute perplexity = exp(entropy) of observed variables.

        Perplexity is an information theoretic measure of the number of
        clusters or observed classes. Perplexity is a real number in the range
        [1, dim[v]], where dim[v] is the number of categories in an observed
        categorical variable or 2 for an ordinal variable.

        Args:
            counts: A [V]-shaped array of multinomial counts.

        Returns:
            A [V]-shaped numpy array of perplexity.
        """
        result = self._ensemble[0].observed_perplexity(counts)
        for server in self._ensemble[1:]:
            result += server.observed_perplexity(counts)
        result /= len(self._ensemble)
        return result
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def latent_correlation(self):
        """Compute correlation matrix among latent features.

        This computes the generalization of Pearson's correlation to discrete
        data. Let I(X;Y) be the mutual information. Then define correlation as

          rho(X,Y) = sqrt(1 - exp(-2 I(X;Y)))

        Returns:
            A [V, V]-shaped numpy array of feature-feature correlations.
        """
        result = self._ensemble[0].latent_correlation()
        for server in self._ensemble[1:]:
            result += server.latent_correlation()
        result /= len(self._ensemble)
        return result
项目:Modeling_Preparation    作者:Yangruipis    | 项目源码 | 文件源码
def begin(self):
        x = random.randint(self.x_range[0], self.x_range[1])
        f = self.func(x)
        T = self.T0
        while T > self.T_min:
            for i in range(self.K):
                new_x = self.gen_new_x(x, T)
                f_x = self.func(new_x)
                delta_E = f_x - f
                #
                if delta_E < 0:
                    f = f_x
                    x = new_x
                    break
                else:
                    #p_k = 1.0 / (1 + np.exp(- delta_E / self.func(T)))
                    p_k = np.exp(- delta_E / T)
                    if random.random() < p_k:
                        f = f_x
                        x = new_x
                        break
            T *= self.delta

        return x
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def optSigma2(self, U, s, y, covars, logdetXX, reml, ldeltamin=-5, ldeltamax=5):

        #Prepare required matrices
        Uy = U.T.dot(y).flatten()
        UX = U.T.dot(covars)        

        if (U.shape[1] < U.shape[0]):
            UUX = covars - U.dot(UX)
            UUy = y - U.dot(Uy)
            UUXUUX = UUX.T.dot(UUX)
            UUXUUy = UUX.T.dot(UUy)
            UUyUUy = UUy.T.dot(UUy)
        else: UUXUUX, UUXUUy, UUyUUy = None, None, None
        numIndividuals = U.shape[0]
        ldeltaopt_glob = optimize.minimize_scalar(self.negLLevalLong, bounds=(-5, 5), method='Bounded', args=(s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml)).x

        ll, sig2g, beta, r2 = self.negLLevalLong(ldeltaopt_glob, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml, returnAllParams=True)
        sig2e = np.exp(ldeltaopt_glob) * sig2g

        return sig2g, sig2e, beta, ll
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def deriveKernel(self, params, i):
        self.checkParamsI(params, i)
        c = np.exp(params[-1])

        #K = self.kernel.getTrainKernel(params[:-1])
        #deriv1 = self.polyDegree * (c+K)**(self.polyDegree-1)

        K = self.kernel.getTrainKernel(params[:-1]) + c     
        if (self.polyDegree==2): Kpower=K
        else:
            Kpower=K**2
            for i in xrange(self.polyDegree-3): Kpower*=K               #this is faster than direct power
        deriv1 = self.polyDegree * Kpower

        if (i==params.shape[0]-1): K_deriv = deriv1*c
        else:      K_deriv = deriv1 * self.kernel.deriveKernel(params[:-1], i)
        return K_deriv
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainKernel(self, params):
        self.checkParams(params)
        if (self.sameParams(params)): return self.cache['getTrainKernel']

        ell = np.exp(params[0])
        if (self.K_sq is None): K = sq_dist(self.X_scaled.T / ell)  #precompute squared distances
        else: K = self.K_sq / ell**2        
        self.cache['K_sq_scaled'] = K

        # # # #manual computation (just for sanity checks)
        # # # K1 = np.exp(-K / 2.0)
        # # # K2 = np.zeros((self.X_scaled.shape[0], self.X_scaled.shape[0]))
        # # # for i1 in xrange(self.X_scaled.shape[0]):
            # # # for i2 in xrange(i1, self.X_scaled.shape[0]):
                # # # diff = self.X_scaled[i1,:] - self.X_scaled[i2,:]
                # # # K2[i1, i2] = np.exp(-np.sum(diff**2) / (2*ell))
                # # # K2[i2, i1] = K2[i1, i2]               
        # # # print np.max((K1-K2)**2)
        # # # sys.exit(0)

        K_exp = np.exp(-K / 2.0)
        self.cache['getTrainKernel'] = K_exp
        self.saveParams(params)
        return K_exp
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainTestKernel(self, params, Xtest):
        self.checkParams(params)
        ell = np.exp(params[0])
        p = np.exp(params[1])

        Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
        d2 = sq_dist(self.X_scaled.T/ell, Xtest_scaled.T/ell)   #precompute squared distances

        #compute dp
        dp = np.zeros(d2.shape)
        for d in xrange(self.X_scaled.shape[1]):
            dp += (np.outer(self.X_scaled[:,d], np.ones((1, Xtest_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), Xtest_scaled[:,d]))
        dp /= p

        K = np.exp(-d2 / 2.0)
        return np.cos(2*np.pi*dp)*K
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainKernel(self, params):
        self.checkParams(params)
        if (self.sameParams(params)): return self.cache['getTrainKernel']
        if ('K_sq_scaled' not in self.cache.keys()): self.cache['K_sq_scaled'] = [None for i in xrange(self.getNumParams())]

        ell = np.exp(params)
        K = 0
        for i in xrange(self.getNumParams()):
            if (self.sameParams(params, i)): K += self.cache['K_sq_scaled'][i]
            else:
                self.cache['K_sq_scaled'][i] = self.K_sq[i] / ell[i]**2
                K += self.cache['K_sq_scaled'][i]
        K_exp = np.exp(-K / 2.0)
        self.cache['getTrainKernel'] = K_exp
        self.saveParams(params)
        return K_exp
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainTestKernel(self, params, Xtest):
        self.checkParams(params)        
        params_kernels = params[len(self.kernels):]

        #compute Kd and EE
        Kd = np.zeros((self.n, Xtest[0].shape[0], len(self.kernels)))
        params_ind = 0
        kernel_paramsArr = params[len(self.kernels):]
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
            kernel_params = kernel_paramsArr[kernelParams_range]            
            Kd[:,:,k_i] = k.getTrainTestKernel(kernel_params, Xtest[k_i])
            params_ind += numHyp
        EE = elsympol(Kd, len(self.kernels))

        #compute K
        K=0             
        for i in xrange(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]          

        return K
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)        
        params_kernels = params[len(self.kernels):]

        #compute Kd and EE
        Kd = np.zeros((Xtest[0].shape[0], 1, len(self.kernels)))
        params_ind = 0
        kernel_paramsArr = params[len(self.kernels):]
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
            kernel_params = kernel_paramsArr[kernelParams_range]            
            Kd[:,0,k_i] = k.getTestKernelDiag(kernel_params, Xtest[k_i])
            params_ind += numHyp
        EE = elsympol(Kd, len(self.kernels))

        #compute K
        K=0             
        for i in xrange(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]          
        return K
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainTestKernel(self, params, Xtest):
        self.checkParams(params)
        ell2 = np.exp(2*params[0])

        z = Xtest / np.sqrt(Xtest.shape[1])
        S = 1 + self.X_scaled.dot(z.T)
        sz = 1 + np.sum(z**2, axis=1)
        sqrtEll2Psx = np.sqrt(ell2+self.sx)
        sqrtEll2Psz = np.sqrt(ell2+sz)
        K = S / np.outer(sqrtEll2Psx, sqrtEll2Psz)
        return np.arcsin(K)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def sample_hparams():
    hparams = {}
    for k, sample_range in ranges.items():
        if isinstance(sample_range, (LogRange, LinearRange)):
            if isinstance(sample_range[0], int):
                # LogRange not valid for ints
                hparams[k] = random.randint(sample_range[0], sample_range[1])
            elif isinstance(sample_range[0], float):
                start, end = sample_range
                if isinstance(sample_range, LogRange):
                    start, end = np.log10(start), np.log10(end)

                choice = np.random.uniform(start, end)
                if isinstance(sample_range, LogRange):
                    choice = np.exp(choice)
                hparams[k] = choice
    return hparams
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def gaussian_kernel(size, std=1.):
    size2 = 1 + 2 * size
    kernel = np.zeros((size2, size2))

    den = 2. * std * std

    for row in range(size2):
        for col in range(size2):
            x = row - size
            y = row - size

            kernel[row, col] = np.exp(-(x*x + y*y) / den)

    kernel /= kernel.sum()

    return kernel

# TODO: check out of bounds
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def sample_hparams():
    hparams = {}
    for k, sample_range in ranges.items():
        if isinstance(sample_range, (LogRange, LinearRange)):
            if isinstance(sample_range[0], int):
                # LogRange not valid for ints
                hparams[k] = random.randint(sample_range[0], sample_range[1])
            elif isinstance(sample_range[0], float):
                start, end = sample_range
                if isinstance(sample_range, LogRange):
                    start, end = np.log10(start), np.log10(end)

                choice = np.random.uniform(start, end)
                if isinstance(sample_range, LogRange):
                    choice = np.exp(choice)
                hparams[k] = choice
    return hparams
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def gaussian_kernel(size, std=1.):
    size2 = 1 + 2 * size
    kernel = np.zeros((size2, size2))

    den = 2. * std * std

    for row in range(size2):
        for col in range(size2):
            x = row - size
            y = row - size

            kernel[row, col] = np.exp(-(x*x + y*y) / den)

    kernel /= kernel.sum()

    return kernel

# TODO: check out of bounds
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def sample_hparams():
    hparams = {}
    for k, sample_range in ranges.items():
        if isinstance(sample_range, (LogRange, LinearRange)):
            if isinstance(sample_range[0], int):
                # LogRange not valid for ints
                hparams[k] = random.randint(sample_range[0], sample_range[1])
            elif isinstance(sample_range[0], float):
                start, end = sample_range
                if isinstance(sample_range, LogRange):
                    start, end = np.log10(start), np.log10(end)

                choice = np.random.uniform(start, end)
                if isinstance(sample_range, LogRange):
                    choice = np.exp(choice)
                hparams[k] = choice
    return hparams
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def sample_hparams():
    hparams = {}
    for k, sample_range in ranges.items():
        if isinstance(sample_range, (LogRange, LinearRange)):
            if isinstance(sample_range[0], int):
                # LogRange not valid for ints
                hparams[k] = random.randint(sample_range[0], sample_range[1])
            elif isinstance(sample_range[0], float):
                start, end = sample_range
                if isinstance(sample_range, LogRange):
                    start, end = np.log10(start), np.log10(end)

                choice = np.random.uniform(start, end)
                if isinstance(sample_range, LogRange):
                    choice = np.exp(choice)
                hparams[k] = choice
    return hparams
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def gaussian_kernel(size, std=1.):
    size2 = 1 + 2 * size
    kernel = np.zeros((size2, size2))

    den = 2. * std * std

    for row in range(size2):
        for col in range(size2):
            x = row - size
            y = row - size

            kernel[row, col] = np.exp(-(x*x + y*y) / den)

    kernel /= kernel.sum()

    return kernel

# TODO: check out of bounds
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def sample_hparams():
    hparams = {}
    for k, sample_range in ranges.items():
        if isinstance(sample_range, (LogRange, LinearRange)):
            if isinstance(sample_range[0], int):
                # LogRange not valid for ints
                hparams[k] = random.randint(sample_range[0], sample_range[1])
            elif isinstance(sample_range[0], float):
                start, end = sample_range
                if isinstance(sample_range, LogRange):
                    start, end = np.log10(start), np.log10(end)

                choice = np.random.uniform(start, end)
                if isinstance(sample_range, LogRange):
                    choice = np.exp(choice)
                hparams[k] = choice
    return hparams
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def gaussian_kernel(size, std=1.):
    size2 = 1 + 2 * size
    kernel = np.zeros((size2, size2))

    den = 2. * std * std

    for row in range(size2):
        for col in range(size2):
            x = row - size
            y = row - size

            kernel[row, col] = np.exp(-(x*x + y*y) / den)

    kernel /= kernel.sum()

    return kernel

# TODO: check out of bounds
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def EStep(self):
    P = np.zeros((self.M, self.N))

    for i in range(0, self.M):
      diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
      diff    = np.multiply(diff, diff)
      P[i, :] = P[i, :] + np.sum(diff, axis=1)

    c = (2 * np.pi * self.sigma2) ** (self.D / 2)
    c = c * self.w / (1 - self.w)
    c = c * self.M / self.N

    P = np.exp(-P / (2 * self.sigma2))
    den = np.sum(P, axis=0)
    den = np.tile(den, (self.M, 1))
    den[den==0] = np.finfo(float).eps

    self.P   = np.divide(P, den)
    self.Pt1 = np.sum(self.P, axis=0)
    self.P1  = np.sum(self.P, axis=1)
    self.Np  = np.sum(self.P1)
项目:galario    作者:mtazzari    | 项目源码 | 文件源码
def create_reference_image(size, x0=10., y0=-3., sigma_x=50., sigma_y=30., dtype='float64',
                           reverse_xaxis=False, correct_axes=True, sizey=None, **kwargs):
    """
    Creates a reference image: a gaussian brightness with elliptical
    """
    inc_cos = np.cos(0./180.*np.pi)

    delta_x = 1.
    x = (np.linspace(0., size - 1, size) - size / 2.) * delta_x

    if sizey:
        y = (np.linspace(0., sizey-1, sizey) - sizey/2.) * delta_x
    else:
        y = x.copy()

    if reverse_xaxis:
        xx, yy = np.meshgrid(-x, y/inc_cos)
    elif correct_axes:
        xx, yy = np.meshgrid(-x, -y/inc_cos)
    else:
        xx, yy = np.meshgrid(x, y/inc_cos)

    image = np.exp(-(xx-x0)**2./sigma_x - (yy-y0)**2./sigma_y)

    return image.astype(dtype)
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def get_action_distr(self, state, beta=0.2):
        '''
        Args:
            state (State)
            beta (float): Softmax temperature parameter.

        Returns:
            (list of floats): The i-th float corresponds to the probability
            mass associated with the i-th action (indexing into self.actions)
        '''
        all_q_vals = []
        for i in xrange(len(self.actions)):
            action = self.actions[i]
            all_q_vals.append(self.get_q_value(state, action))

        # Softmax distribution.
        total = sum([numpy.exp(beta * qv) for qv in all_q_vals])
        softmax = [numpy.exp(beta * qv) / total for qv in all_q_vals]

        return softmax
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def monotoneTFosc(f):
    """Maps [-inf,inf] to [-inf,inf] with different constants
    for positive and negative part.

    """
    if np.isscalar(f):
        if f > 0.:
            f = np.log(f) / 0.1
            f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1
        elif f < 0.:
            f = np.log(-f) / 0.1
            f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1
        return f
    else:
        f = np.asarray(f)
        g = f.copy()
        idx = (f > 0)
        g[idx] = np.log(f[idx]) / 0.1
        g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1
        idx = (f < 0)
        g[idx] = np.log(-f[idx]) / 0.1
        g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1
        return g
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def make_gaussian(size, fwhm=3, center=None):
    """ Make a square gaussian kernel.
    size is the length of a side of the square
    fwhm is full-width-half-maximum, which
    can be thought of as an effective radius.
    """

    x = np.arange(0, size, 1, float)
    y = x[:, np.newaxis]

    if center is None:
        x0 = y0 = size // 2
    else:
        x0 = center[0]
        y0 = center[1]

    return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / 2.0 / fwhm / fwhm)
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, input, *args, **kwargs):
        """A sigmoid function is a mathematical function having a 
        characteristic "S"-shaped curve or sigmoid curve. Often, 
        sigmoid function refers to the special case of the logistic 
        function and defined by the formula :math:`\\varphi(x) = \\frac{1}{1 + e^{-x}}`
        (given the input :math:`x`).

        Parameters
        ----------
        input : float32
            The activation (the summed, weighted input of a neuron).

        Returns
        -------
        float32 in [0, 1]
            The output of the sigmoid function applied to the activation.
        """

        self.last_forward = 1.0 / (1.0 + np.exp(-input))
        return self.last_forward
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def forward(self, input):
        """:math:`\\varphi(\\mathbf{x})_j =
        \\frac{e^{\mathbf{x}_j}}{\sum_{k=1}^K e^{\mathbf{x}_k}}`
        where :math:`K` is the total number of neurons in the layer. This
        activation function gets applied row-wise.

        Parameters
        ----------
        x : float32
            The activation (the summed, weighted input of a neuron).

        Returns
        -------
        float32 where the sum of the row is 1 and each single value is in [0, 1]
            The output of the softmax function applied to the activation.
        """
        assert np.ndim(input) == 2
        self.last_forward = input
        x = input - np.max(input, axis=1, keepdims=True)
        exp_x = np.exp(x)
        s = exp_x / np.sum(exp_x, axis=1, keepdims=True)
        return s
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def __call__(self, params):
        print '???', params
        sd1 = params[0]
        sd2 = params[1]
        cor = params[2]

        if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.:
            return np.inf

        bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor)
        bandwidthdet = la.det(bandwidth)
        bandwidthinv = la.inv(bandwidth)

        diff = sample[self.__iidx] - sample[self.__jidx]
        temp = diff.dot(bandwidthinv.T)
        temp *= temp
        e = np.exp(np.sum(temp, axis=1))
        s = np.sum(e**(-.25) - 4 * e**(-.5))

        cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s
        print '!!!', cost
        return cost / 10000.
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def step(self, mode):
        if mode == "train" and self.mode == "test":
            raise Exception("Cannot train during test mode")

        if mode == "train":
            theano_fn = self.train_fn
            batch_gen = self.train_batch_gen
        elif mode == "test":
            theano_fn = self.test_fn
            batch_gen = self.test_batch_gen
        else:
            raise Exception("Invalid mode")

        data = next(batch_gen)
        ys = data[-1]
        data = data[:-1]
        ret = theano_fn(*data)

        return {"prediction": np.exp(ret[0]) - 1,
                "answers": ys,
                "current_loss": ret[1],
                "loss_reg": ret[2],
                "loss_mse": ret[1] - ret[2],
                "log": ""}
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def evaluation(self, X_test, y_test):
        # normalization
        X_test = self.normalization(X_test)

        # average over the output
        pred_y_test = np.zeros([self.M, len(y_test)])
        prob = np.zeros([self.M, len(y_test)])

        '''
            Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
        '''
        for i in range(self.M):
            w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
            pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
            prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
        pred = np.mean(pred_y_test, axis=0)

        # evaluation
        svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
        svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))

        return (svgd_rmse, svgd_ll)
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, theta, h = -1):
        sq_dist = pdist(theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))

        # compute the rbf kernel
        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def _linear_phase(self, n_shift):
        """
        Private: Select the center of FOV
        """
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_scale1(N, K, alpha, beta, Nmid):
    '''
    calculate image space scaling factor
    '''
#     import types
#     if alpha is types.ComplexType:
    alpha = numpy.real(alpha)
#         print('complex alpha may not work, but I just let it as')

    L = len(alpha) - 1
    if L > 0:
        sn = numpy.zeros((N, 1))
        n = numpy.arange(0, N).reshape((N, 1), order='F')
        i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
        for l1 in range(-L, L + 1):
            alf = alpha[abs(l1)]
            if l1 < 0:
                alf = numpy.conj(alf)
            sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
    else:
        sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
    return sn
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def linear_phase(self, n_shift):
        '''
        Select the center of FOV
        '''
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
        # multiply the diagonal, linear phase before the gridding matrix
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_scale1(N, K, alpha, beta, Nmid):
    '''
    Calculate image space scaling factor
    '''
#     import types
#     if alpha is types.ComplexType:
    alpha = numpy.real(alpha)
#         print('complex alpha may not work, but I just let it as')

    L = len(alpha) - 1
    if L > 0:
        sn = numpy.zeros((N, 1))
        n = numpy.arange(0, N).reshape((N, 1), order='F')
        i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
        for l1 in range(-L, L + 1):
            alf = alpha[abs(l1)]
            if l1 < 0:
                alf = numpy.conj(alf)
            sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
    else:
        sn = numpy.dot(alpha, numpy.ones((N, 1)))
    return sn
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def _linear_phase(self, n_shift):
        """
        Private: Select the center of FOV
        """
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
项目:STA141C    作者:clarkfitzg    | 项目源码 | 文件源码
def f(w, lamb):
    """
    Eq. (2) in problem 2

    Non-vectorized, slow
    """
    total = 0
    nrows = X.shape[0]
    for i in range(nrows):
        current = 1 + np.exp(-y[i] * X[i, ].dot(w))
        total += np.log(current)
    total += (lamb / 2) * w.dot(w)
    return total
项目:STA141C    作者:clarkfitzg    | 项目源码 | 文件源码
def f2(w, lamb):
    """
    Eq. (2) in problem 2

    Vectorized (no explicit loops), fast
    """
    yxTw = y * X.dot(w)
    firstpart = np.log(1 + np.exp(-yxTw))
    total = firstpart.sum()
    total += (lamb / 2) * w.dot(w)
    return total
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def pac_metric (solution, prediction, task='binary.classification'):
    ''' Probabilistic Accuracy based on log_loss metric. 
    We assume the solution is in {0, 1} and prediction in [0, 1].
    Otherwise, run normalize_array.''' 
    debug_flag=False
    [sample_num, label_num] = solution.shape
    if label_num==1: task='binary.classification'
    eps = 1e-15
    the_log_loss = log_loss(solution, prediction, task)
    # Compute the base log loss (using the prior probabilities)    
    pos_num = 1.* sum(solution) # float conversion!
    frac_pos = pos_num / sample_num # prior proba of positive class
    the_base_log_loss = prior_log_loss(frac_pos, task)
    # Alternative computation of the same thing (slower)    
    # Should always return the same thing except in the multi-label case
    # For which the analytic solution makes more sense
    if debug_flag:
        base_prediction = np.empty(prediction.shape)
        for k in range(sample_num): base_prediction[k,:] = frac_pos
        base_log_loss = log_loss(solution, base_prediction, task)  
        diff = np.array(abs(the_base_log_loss-base_log_loss))
        if len(diff.shape)>0: diff=max(diff)
        if(diff)>1e-10: 
            print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
    # Exponentiate to turn into an accuracy-like score.
    # In the multi-label case, we need to average AFTER taking the exp 
    # because it is an NL operation
    pac = mvmean(np.exp(-the_log_loss)) 
    base_pac = mvmean(np.exp(-the_base_log_loss))
    # Normalize: 0 for random, 1 for perfect    
    score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
    return score
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def softmax(x):
    e_x = np.exp(x - np.max(x))
    out = e_x / e_x.sum()
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def f(r,theta):
    out = np.sin(theta)*r*np.exp(-r/2.)
    out[-1] = 0
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def dfdr(r,theta):
    out = np.sin(theta)*(np.exp(-r/2.) - (1./2.)*r*np.exp(-r/2.))
    out[-1] = 0
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def dfdrdtheta(r,theta):
    out = np.cos(theta)*(np.exp(-r/2.) - (1./2.)*r*np.exp(-r/2.))
    out[-1] = 0
    return out
项目:topically-driven-language-model    作者:jhlau    | 项目源码 | 文件源码
def sample(self, probs, temperature):
        if temperature == 0:
            return np.argmax(probs)

        probs = probs.astype(np.float64) #convert to float64 for higher precision
        probs = np.log(probs) / temperature
        probs = np.exp(probs) / math.fsum(np.exp(probs))
        return np.argmax(np.random.multinomial(1, probs, 1))

    #generate a sentence given conv_hidden
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def softmax(x):
    act = np.exp(x - np.max(x))
    return act / act.sum()