Python numpy 模块,pi() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.pi()

项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def deriveKernel(self, params, i):
        self.checkParamsI(params, i)
        ell = np.exp(params[0])
        p = np.exp(params[1])

        #compute d2
        if (self.K_sq is None): d2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
        else: d2 = self.K_sq / ell**2

        #compute dp
        dp = self.dp/p

        K = np.exp(-d2 / 2.0)
        if (i==0): return d2*K*np.cos(2*np.pi*dp)
        elif (i==1): return 2*np.pi*dp*np.sin(2*np.pi*dp)*K
        else: raise Exception('invalid parameter index:' + str(i))
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def gelu(x):
    return 0.5 * x * (1 + T.tanh(T.sqrt(2 / np.pi) * (x + 0.044715 * T.pow(x, 3))))
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def get_local_wavenumbermesh(self, scaled=True, broadcast=False,
                                 eliminate_highest_freq=False):
        kx = fftfreq(self.N[0], 1./self.N[0])
        ky = rfftfreq(self.N[1], 1./self.N[1])
        if eliminate_highest_freq:
            for i, k in enumerate((kx, ky)):
                if self.N[i] % 2 == 0:
                    k[self.N[i]//2] = 0

        Ks = np.meshgrid(kx, ky[self.rank*self.Np[1]//2:(self.rank*self.Np[1]//2+self.Npf)], indexing='ij', sparse=True)
        if scaled is True:
            Lp = 2*np.pi/self.L
            Ks[0] *= Lp[0]
            Ks[1] *= Lp[1]
        K = Ks
        if broadcast is True:
            K = [np.broadcast_to(k, self.complex_shape()) for k in Ks]
        return K
项目:Modeling_Preparation    作者:Yangruipis    | 项目源码 | 文件源码
def _generate_data():
    """
    ?????
    ????u(k-1) ? y(k-1)?????y(k)
    """
    # u = np.random.uniform(-1,1,200)
    # y=[]
    # former_y_value = 0
    # for i in np.arange(0,200):
    #     y.append(former_y_value)
    #     next_y_value = (29.0 / 40) * np.sin(
    #         (16.0 * u[i] + 8 * former_y_value) / (3.0 + 4.0 * (u[i] ** 2) + 4 * (former_y_value ** 2))) \
    #                    + (2.0 / 10) * u[i] + (2.0 / 10) * former_y_value
    #     former_y_value = next_y_value
    # return u,y
    u1 = np.random.uniform(-np.pi,np.pi,200)
    u2 = np.random.uniform(-1,1,200)
    y = np.zeros(200)
    for i in range(200):
        value = np.sin(u1[i]) + u2[i]
        y[i] =  value
    return u1, u2, y
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def ae(x):
    if nonlinearity_name == 'relu':
        f = tf.nn.relu
    elif nonlinearity_name == 'elu':
        f = tf.nn.elu
    elif nonlinearity_name == 'gelu':
        # def gelu(x):
        #     return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
        # f = gelu
        def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
        f = gelu_fast
    elif nonlinearity_name == 'silu':
        def silu(_x):
            return _x * tf.sigmoid(_x)
        f = silu
    # elif nonlinearity_name == 'soi':
    #     def soi_map(x):
    #         u = tf.random_uniform(tf.shape(x))
    #         mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
    #         return tf.cond(is_training, lambda: tf.mul(mask, x),
    #                        lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
    #     f = soi_map

    else:
        raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")

    h1 = f(tf.matmul(x, W['1']) + b['1'])
    h2 = f(tf.matmul(h1, W['2']) + b['2'])
    h3 = f(tf.matmul(h2, W['3']) + b['3'])
    h4 = f(tf.matmul(h3, W['4']) + b['4'])
    h5 = f(tf.matmul(h4, W['5']) + b['5'])
    h6 = f(tf.matmul(h5, W['6']) + b['6'])
    h7 = f(tf.matmul(h6, W['7']) + b['7'])
    return tf.matmul(h7, W['8']) + b['8']
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def score_samples(self, X):
        """Return the log-likelihood of each sample
        See. "Pattern Recognition and Machine Learning"
        by C. Bishop, 12.2.1 p. 574
        or http://www.miketipping.com/papers/met-mppca.pdf
        Parameters
        ----------
        X: array, shape(n_samples, n_features)
            The data.
        Returns
        -------
        ll: array, shape (n_samples,)
            Log-likelihood of each sample under the current model
        """
        check_is_fitted(self, 'mean_')

        X = check_array(X)
        Xr = X - self.mean_
        n_features = X.shape[1]
        log_like = np.zeros(X.shape[0])
        precision = self.get_precision()
        log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
        log_like -= .5 * (n_features * log(2. * np.pi)
                          - fast_logdet(precision))
        return log_like
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getTrainTestKernel(self, params, Xtest):
        self.checkParams(params)
        ell = np.exp(params[0])
        p = np.exp(params[1])

        Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
        d2 = sq_dist(self.X_scaled.T/ell, Xtest_scaled.T/ell)   #precompute squared distances

        #compute dp
        dp = np.zeros(d2.shape)
        for d in xrange(self.X_scaled.shape[1]):
            dp += (np.outer(self.X_scaled[:,d], np.ones((1, Xtest_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), Xtest_scaled[:,d]))
        dp /= p

        K = np.exp(-d2 / 2.0)
        return np.cos(2*np.pi*dp)*K
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def reset(self,random_start_state=False, assign_state = False, n=None, k = None, \
        perturb_params = False, p_LINK_LENGTH_1 = 0, p_LINK_LENGTH_2 = 0, \
        p_LINK_MASS_1 = 0, p_LINK_MASS_2 = 0, **kw):
        self.t = 0
        self.state = np.random.uniform(low=-0.1,high=0.1,size=(4,))

        self.LINK_LENGTH_1 = 1.  # [m]
        self.LINK_LENGTH_2 = 1.  # [m]
        self.LINK_MASS_1 = 1.  #: [kg] mass of link 1
        self.LINK_MASS_2 = 1.

        if perturb_params:
            self.LINK_LENGTH_1 += (self.LINK_LENGTH_1 * p_LINK_LENGTH_1)  # [m]
            self.LINK_LENGTH_2 += (self.LINK_LENGTH_2 * p_LINK_LENGTH_2)  # [m]
            self.LINK_MASS_1 += (self.LINK_MASS_1 * p_LINK_MASS_1)  #: [kg] mass of link 1
            self.LINK_MASS_2 += (self.LINK_MASS_2 * p_LINK_MASS_2)  #: [kg] mass of link 2

        # The idea here is that we can initialize our batch randomly so that we can get
        # more variety in the state space that we attempt to fit a policy to.
        if random_start_state:
            self.state[:2] = np.random.uniform(-np.pi,np.pi,size=2)

        if assign_state:
            self.state[0] = wrap((2*k*np.pi)/(1.0*n),-np.pi,np.pi)
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def calc_reward(self, action = None, state = None , **kw ):
        '''Calculates the continuous reward based on the height of the foot (y position) 
        with a penalty applied if the hinge is moving (we want the acrobot to be upright
        and stationary!), which is then normalized by the combined lengths of the links'''
        t = self.target
        if state is None:
            s = self.state
        else:
            s = state
            # Make sure that input state is clipped/wrapped to the given bounds (not guaranteed when coming from the BNN)
            s[0] = wrap( s[0] , -np.pi , np.pi )
            s[1] = wrap( s[1] , -np.pi , np.pi )
            s[2] = bound( s[2] , -self.MAX_VEL_1 , self.MAX_VEL_1 )
            s[3] = bound( s[3] , -self.MAX_VEL_1 , self.MAX_VEL_1 )

        hinge, foot = self.get_cartesian_points(s)
        reward = -0.05 * (foot[0] - self.LINK_LENGTH_1)**2

        terminal = self.is_terminal(s)
        return 10 if terminal else reward
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def EStep(self):
    P = np.zeros((self.M, self.N))

    for i in range(0, self.M):
      diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
      diff    = np.multiply(diff, diff)
      P[i, :] = P[i, :] + np.sum(diff, axis=1)

    c = (2 * np.pi * self.sigma2) ** (self.D / 2)
    c = c * self.w / (1 - self.w)
    c = c * self.M / self.N

    P = np.exp(-P / (2 * self.sigma2))
    den = np.sum(P, axis=0)
    den = np.tile(den, (self.M, 1))
    den[den==0] = np.finfo(float).eps

    self.P   = np.divide(P, den)
    self.Pt1 = np.sum(self.P, axis=0)
    self.P1  = np.sum(self.P, axis=1)
    self.Np  = np.sum(self.P1)
项目:galario    作者:mtazzari    | 项目源码 | 文件源码
def create_reference_image(size, x0=10., y0=-3., sigma_x=50., sigma_y=30., dtype='float64',
                           reverse_xaxis=False, correct_axes=True, sizey=None, **kwargs):
    """
    Creates a reference image: a gaussian brightness with elliptical
    """
    inc_cos = np.cos(0./180.*np.pi)

    delta_x = 1.
    x = (np.linspace(0., size - 1, size) - size / 2.) * delta_x

    if sizey:
        y = (np.linspace(0., sizey-1, sizey) - sizey/2.) * delta_x
    else:
        y = x.copy()

    if reverse_xaxis:
        xx, yy = np.meshgrid(-x, y/inc_cos)
    elif correct_axes:
        xx, yy = np.meshgrid(-x, -y/inc_cos)
    else:
        xx, yy = np.meshgrid(x, y/inc_cos)

    image = np.exp(-(xx-x0)**2./sigma_x - (yy-y0)**2./sigma_y)

    return image.astype(dtype)
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def rotate_point_cloud(batch_data):
    """ Randomly rotate the point clouds to augument the dataset
        rotation is per shape based along up direction
        Input:
          BxNx3 array, original batch of point clouds
        Return:
          BxNx3 array, rotated batch of point clouds
    """
    rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
    for k in range(batch_data.shape[0]):
        rotation_angle = np.random.uniform() * 2 * np.pi
        cosval = np.cos(rotation_angle)
        sinval = np.sin(rotation_angle)
        rotation_matrix = np.array([[cosval, 0, sinval],
                                    [0, 1, 0],
                                    [-sinval, 0, cosval]])
        shape_pc = batch_data[k, ...]
        rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
    return rotated_data
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
    """ Rotate the point cloud along up direction with certain angle.
        Input:
          BxNx3 array, original batch of point clouds
        Return:
          BxNx3 array, rotated batch of point clouds
    """
    rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
    for k in range(batch_data.shape[0]):
        #rotation_angle = np.random.uniform() * 2 * np.pi
        cosval = np.cos(rotation_angle)
        sinval = np.sin(rotation_angle)
        rotation_matrix = np.array([[cosval, 0, sinval],
                                    [0, 1, 0],
                                    [-sinval, 0, cosval]])
        shape_pc = batch_data[k, ...]
        rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
    return rotated_data
项目:SpicePy    作者:giaccone    | 项目源码 | 文件源码
def ac_solve(net):
    """

    :param net:
    :return:
    """

    net.conductance_matrix()
    net.dynamic_matrix()
    net.rhs_matrix()

    # frequency
    f = float(net.analysis[-1])

    # linear system definition
    net.x = spsolve(net.G + 1j * 2 * np.pi * f* net.C, net.rhs)
项目:mazerunner    作者:lucasdavid    | 项目源码 | 文件源码
def thinking(self):
        """Deliberate to avoid obstacles on the path."""
        if self.motion.moveIsActive():
            # Maneuver occurring. Let's finish it
            # before taking any other measure.
            pass

        elif not self.sensors['proximity'][0].imminent_collision:
            # Goes back to moving state.
            self.behavior_ = self.BEHAVIORS.moving

        elif all(s.imminent_collision for s in self.sensors['proximity']):
            # There's nothing left to be done, only flag this is a dead-end.
            self.behavior_ = self.BEHAVIORS.stuck

        else:
            peripheral_sensors = self.sensors['proximity'][1:]
            for maneuver, sensor in zip(range(1, 4), peripheral_sensors):
                if not sensor.imminent_collision:
                    # A sensor that indicates no obstacles were found.
                    # Move in that direction.
                    self.motion.post.moveTo(0, 0, np.pi / 2)
                    break

        return self
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def gaussian_nll(x, mus, sigmas):
    """
    NLL for Multivariate Normal with diagonal covariance matrix
    See:
        wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function
    where \Sigma = diag(s_1^2,..., s_n^2).

    x, mus, sigmas all should have the same shape.
    sigmas (s_1,..., s_n) should be strictly positive.
    Results in output shape of similar but without the last dimension.
    """
    nll = lib.floatX(numpy.log(2. * numpy.pi))
    nll += 2. * T.log(sigmas)
    nll += ((x - mus) / sigmas) ** 2.
    nll = nll.sum(axis=-1)
    nll *= lib.floatX(0.5)
    return nll
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def tsukuba_load_poses(fn): 
    """ 
    Retrieve poses
    X Y Z R P Y - > X -Y -Z R -P -Y

    np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
        p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]

    """ 
    P = np.loadtxt(os.path.expanduser(fn), dtype=np.float64, delimiter=',')
    return [ RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) * \
             RigidTransform.from_rpyxyz(
                 np.deg2rad(p[3]),np.deg2rad(p[4]),np.deg2rad(p[5]),
                 p[0]*.01,p[1]*.01,p[2]*.01, axes='sxyz') * \
             RigidTransform.from_rpyxyz(np.pi, 0, 0, 0, 0, 0) for p in P ]

    # return [ RigidTransform.from_rpyxyz(
    #     np.deg2rad(p[3]),-np.deg2rad(p[4]),-np.deg2rad(p[5]),
    #     p[0]*.01,-p[1]*.01,-p[2]*.01, axes='sxyz') for p in P ]
项目:a-nice-mc    作者:ermongroup    | 项目源码 | 文件源码
def __call__(self, z):
        z1 = tf.reshape(tf.slice(z, [0, 0], [-1, 1]), [-1])
        z2 = tf.reshape(tf.slice(z, [0, 1], [-1, 1]), [-1])
        v1 = tf.sqrt((z1 - 5) * (z1 - 5) + z2 * z2) * 2
        v2 = tf.sqrt((z1 + 5) * (z1 + 5) + z2 * z2) * 2
        v3 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
        v4 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
        v5 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
        v6 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
        pdf1 = tf.exp(-0.5 * v1 * v1) / tf.sqrt(2 * np.pi * 0.25)
        pdf2 = tf.exp(-0.5 * v2 * v2) / tf.sqrt(2 * np.pi * 0.25)
        pdf3 = tf.exp(-0.5 * v3 * v3) / tf.sqrt(2 * np.pi * 0.25)
        pdf4 = tf.exp(-0.5 * v4 * v4) / tf.sqrt(2 * np.pi * 0.25)
        pdf5 = tf.exp(-0.5 * v5 * v5) / tf.sqrt(2 * np.pi * 0.25)
        pdf6 = tf.exp(-0.5 * v6 * v6) / tf.sqrt(2 * np.pi * 0.25)
        return -tf.log((pdf1 + pdf2 + pdf3 + pdf4 + pdf5 + pdf6) / 6)
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def _evalfull(self, x):
        fadd = self.fopt
        curshape, dim = self.shape_(x)
        # it is assumed x are row vectors

        if self.lastshape != curshape:
            self.initwithsize(curshape, dim)

        # BOUNDARY HANDLING

        # TRANSFORMATION IN SEARCH SPACE
        x = x - self.arrxopt
        x = monotoneTFosc(x)
        idx = (x > 0)
        x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
        x = self.arrscales * x

        # COMPUTATION core
        ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
        fval = self.noise(ftrue) # without noise

        # FINALIZE
        ftrue += fadd
        fval += fadd
        return fval, ftrue
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def initwithsize(self, curshape, dim):
        # DIM-dependent initialization
        if self.dim != dim:
            if self.zerox:
                self.xopt = zeros(dim)
            else:
                self.xopt = compute_xopt(self.rseed, dim)
            self.rotation = compute_rotation(self.rseed + 1e6, dim)
            self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?
            self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
            # decouple scaling from function definition
            self.linearTF = dot(self.linearTF, self.rotation)
            K = np.arange(0, 12)
            self.aK = np.reshape(0.5 ** K, (1, 12))
            self.bK = np.reshape(3. ** K, (1, 12))
            self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value

        # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
        if self.lastshape != curshape:
            self.dim = dim
            self.lastshape = curshape
            self.arrxopt = resize(self.xopt, curshape)
项目:ModernGL-Volume-Raycasting-Example    作者:ulricheck    | 项目源码 | 文件源码
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.camera_center += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.camera_center - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.camera_fov * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.camera_center = self.camera_center + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def test_pitch_estimation(self):
        """
        test pitch estimation algo with contrived small example
        if pitch is within 5 Hz, then say its good (for this small example,
        since the algorithm wasn't made for this type of synthesized signal)
        """
        cfg = ExperimentConfig(pitch_strength_thresh=-np.inf)
        # the next 3 variables are in Hz
        tolerance = 5
        fs = 48000
        f = 150
        # create a sine wave of f Hz freq sampled at fs Hz
        x = np.sin(2*np.pi * f/fs * np.arange(2**10))
        # estimate the pitch, it should be close to f
        p, t, s = pest.pitch_estimation(x, fs, cfg)
        self.assertTrue(np.all(np.abs(p - f) < tolerance))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setFromQTransform(self, tr):
        p1 = Point(tr.map(0., 0.))
        p2 = Point(tr.map(1., 0.))
        p3 = Point(tr.map(0., 1.))

        dp2 = Point(p2-p1)
        dp3 = Point(p3-p1)

        ## detect flipped axes
        if dp2.angle(dp3) > 0:
            #da = 180
            da = 0
            sy = -1.0
        else:
            da = 0
            sy = 1.0

        self._state = {
            'pos': Point(p1),
            'scale': Point(dp2.length(), dp3.length() * sy),
            'angle': (np.arctan2(dp2[1], dp2[0]) * 180. / np.pi) + da
        }
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.opts['center'] - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
    """
    Construct a path outlining an arrow with the given dimensions.
    The arrow points in the -x direction with tip positioned at 0,0.
    If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
    If *tailLen* is None, no tail will be drawn.
    """
    headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
    path = QtGui.QPainterPath()
    path.moveTo(0,0)
    path.lineTo(headLen, -headWidth)
    if tailLen is None:
        innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, 0)
    else:
        tailWidth *= 0.5
        innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, -tailWidth)
        path.lineTo(headLen + tailLen, -tailWidth)
        path.lineTo(headLen + tailLen, tailWidth)
        path.lineTo(innerY, tailWidth)
    path.lineTo(headLen, headWidth)
    path.lineTo(0,0)
    return path
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setFromQTransform(self, tr):
        p1 = Point(tr.map(0., 0.))
        p2 = Point(tr.map(1., 0.))
        p3 = Point(tr.map(0., 1.))

        dp2 = Point(p2-p1)
        dp3 = Point(p3-p1)

        ## detect flipped axes
        if dp2.angle(dp3) > 0:
            #da = 180
            da = 0
            sy = -1.0
        else:
            da = 0
            sy = 1.0

        self._state = {
            'pos': Point(p1),
            'scale': Point(dp2.length(), dp3.length() * sy),
            'angle': (np.arctan2(dp2[1], dp2[0]) * 180. / np.pi) + da
        }
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.opts['center'] - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
项目:stcad    作者:feschmidt    | 项目源码 | 文件源码
def make_wafer(self,wafer_r,frame,label,labelloc,labelwidth):
        """
        Generate wafer with primary flat on the left. From https://coresix.com/products/wafers/ I estimated that the angle defining the wafer flat to arctan(flat/2 / radius)
        """
        angled = 18
        angle = angled*np.pi/180
        circ = cad.shapes.Circle((0,0), wafer_r, width=self.boxwidth, initial_angle=180+angled, final_angle=360+180-angled, layer=self.layer_box)
        flat = cad.core.Path([(-wafer_r*np.cos(angle),wafer_r*np.sin(angle)),(-wafer_r*np.cos(angle),-wafer_r*np.sin(angle))], width=self.boxwidth, layer=self.layer_box)

        date = time.strftime("%d/%m/%Y")
        if labelloc==(0,0):
                    labelloc=(-2e3,wafer_r-1e3)
        # The label is added 100 um on top of the main cell
        label_grid_chip = cad.shapes.LineLabel( self.name + "  " +\
                                         date,500,position=labelloc,
                                         line_width=labelwidth,
                                         layer=self.layer_label)


        if frame==True:
            self.add(circ)
            self.add(flat)
        if label==True:
            self.add(label_grid_chip)
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def evaluation(self, X_test, y_test):
        # normalization
        X_test = self.normalization(X_test)

        # average over the output
        pred_y_test = np.zeros([self.M, len(y_test)])
        prob = np.zeros([self.M, len(y_test)])

        '''
            Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
        '''
        for i in range(self.M):
            w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
            pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
            prob[i, :] = np.sqrt(np.exp(loggamma)) /np.sqrt(2*np.pi) * np.exp( -1 * (np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma) )
        pred = np.mean(pred_y_test, axis=0)

        # evaluation
        svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
        svgd_ll = np.mean(np.log(np.mean(prob, axis = 0)))

        return (svgd_rmse, svgd_ll)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_scale1(N, K, alpha, beta, Nmid):
    '''
    calculate image space scaling factor
    '''
#     import types
#     if alpha is types.ComplexType:
    alpha = numpy.real(alpha)
#         print('complex alpha may not work, but I just let it as')

    L = len(alpha) - 1
    if L > 0:
        sn = numpy.zeros((N, 1))
        n = numpy.arange(0, N).reshape((N, 1), order='F')
        i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
        for l1 in range(-L, L + 1):
            alf = alpha[abs(l1)]
            if l1 < 0:
                alf = numpy.conj(alf)
            sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
    else:
        sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
    return sn
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_r(om, N, J, K, alpha, beta):
    '''
    equation (30) of Fessler's paper

    '''

    M = numpy.size(om)  # 1D size
    gam = 2.0 * numpy.pi / (K * 1.0)
    nufft_offset0 = nufft_offset(om, J, K)  # om/gam -  nufft_offset , [M,1]
    dk = 1.0 * om / gam - nufft_offset0  # om/gam -  nufft_offset , [M,1]
    arg = outer_sum(-numpy.arange(1, J + 1) * 1.0, dk)
    L = numpy.size(alpha) - 1
#     print('alpha',alpha)
    rr = numpy.zeros((J, M), dtype=numpy.float32)
    rr = iterate_l1(L, alpha, arg, beta, K, N, rr)
    return (rr, arg)
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def kaiser_bessel_ft(u, J, alpha, kb_m, d):
    '''
    Interpolation weight for given J/alpha/kb-m
    '''

    u = u * (1.0 + 0.0j)
    import scipy.special
    z = numpy.sqrt((2 * numpy.pi * (J / 2) * u) ** 2.0 - alpha ** 2.0)
    nu = d / 2 + kb_m
    y = ((2 * numpy.pi) ** (d / 2)) * ((J / 2) ** d) * (alpha ** kb_m) / \
        scipy.special.iv(kb_m, alpha) * scipy.special.jv(nu, z) / (z ** nu)
    y = numpy.real(y)
    return y
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_scale1(N, K, alpha, beta, Nmid):
    '''
    Calculate image space scaling factor
    '''
#     import types
#     if alpha is types.ComplexType:
    alpha = numpy.real(alpha)
#         print('complex alpha may not work, but I just let it as')

    L = len(alpha) - 1
    if L > 0:
        sn = numpy.zeros((N, 1))
        n = numpy.arange(0, N).reshape((N, 1), order='F')
        i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
        for l1 in range(-L, L + 1):
            alf = alpha[abs(l1)]
            if l1 < 0:
                alf = numpy.conj(alf)
            sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
    else:
        sn = numpy.dot(alpha, numpy.ones((N, 1)))
    return sn
项目:SWEETer-Cat    作者:DanielAndreasen    | 项目源码 | 文件源码
def planetary_radius(mass, radius):
    """Calculate planetary radius if not given assuming a density dependent on
    mass"""
    if not isinstance(mass, (int, float)):
        if isinstance(radius, (int, float)):
            return radius
        else:
            return '...'
    if mass < 0:
        raise ValueError('Only positive planetary masses allowed.')

    Mj = c.M_jup
    Rj = c.R_jup
    if radius == '...' and isinstance(mass, (int, float)):
        if mass < 0.01:  # Earth density
            rho = 5.51
        elif 0.01 <= mass <= 0.5:
            rho = 1.64  # Neptune density
        else:
            rho = Mj/(4./3*np.pi*Rj**3)  # Jupiter density
        R = ((mass*Mj)/(4./3*np.pi*rho))**(1./3)  # Neptune density
        R /= Rj
    else:
        return radius
    return R.value
项目:mdct    作者:nils-werner    | 项目源码 | 文件源码
def test_kbd():
    M = 100
    w = mdct.windows.kaiser_derived(M, beta=4.)

    assert numpy.allclose(w[:M//2] ** 2 + w[-M//2:] ** 2, 1.)

    with pytest.raises(ValueError):
        mdct.windows.kaiser_derived(M + 1, beta=4.)

    assert numpy.allclose(
        mdct.windows.kaiser_derived(2, beta=numpy.pi/2)[:1],
        [numpy.sqrt(2)/2])

    assert numpy.allclose(
        mdct.windows.kaiser_derived(4, beta=numpy.pi/2)[:2],
        [0.518562710536, 0.855039598640])

    assert numpy.allclose(
        mdct.windows.kaiser_derived(6, beta=numpy.pi/2)[:3],
        [0.436168993154, 0.707106781187, 0.899864772847])
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def gaussian_kernel(kernel_shape, sigma=None):
    """
    Get 2D Gaussian kernel
    :param kernel_shape: kernel size
    :param sigma: sigma of Gaussian distribution
    :return: 2D Gaussian kernel
    """
    kern = numpy.zeros((kernel_shape, kernel_shape), dtype='float32')

    # get sigma from kernel size
    if sigma is None:
        sigma = 0.3*((kernel_shape-1.)*0.5 - 1.) + 0.8

    def gauss(x, y, s):
        Z = 2. * numpy.pi * s ** 2.
        return 1. / Z * numpy.exp(-(x ** 2. + y ** 2.) / (2. * s ** 2.))

    mid = numpy.floor(kernel_shape / 2.)
    for i in xrange(0, kernel_shape):
        for j in xrange(0, kernel_shape):
            kern[i, j] = gauss(i - mid, j - mid, sigma)

    return kern / kern.sum()
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def save_hough(self, lines, clmap):
        """
        :param lines: (rho, theta) pairs
        :param clmap: clusters assigned to lines
        :return: None
        """
        height, width = self.image.shape
        ratio = 600. * (self.step+1) / min(height, width)
        temp = cv2.resize(self.image, None, fx=ratio, fy=ratio,
                          interpolation=cv2.INTER_CUBIC)
        temp = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
        colors = [(0, 127, 255), (255, 0, 127)]

        for i in range(0, np.size(lines) / 2):
            rho = lines[i, 0]
            theta = lines[i, 1]
            color = colors[clmap[i, 0]]
            if theta < np.pi / 4 or theta > 3 * np.pi / 4:
                pt1 = (rho / np.cos(theta), 0)
                pt2 = (rho - height * np.sin(theta) / np.cos(theta), height)
            else:
                pt1 = (0, rho / np.sin(theta))
                pt2 = (width, (rho - width * np.cos(theta)) / np.sin(theta))
            pt1 = (int(pt1[0]), int(pt1[1]))
            pt2 = (int(pt2[0]), int(pt2[1]))
            cv2.line(temp, pt1, pt2, color, 5)

        self.save2image(temp)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
    """
    Build a covariance matrix for a 2D multivariate Gaussian

    --- INPUT ---
    sigmax          Standard deviation of the x-compoent of the multivariate Gaussian
    sigmay          Standard deviation of the y-compoent of the multivariate Gaussian
    angle           Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
    verbose         Toggle verbosity
    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    covmatrix = tu.build_2D_cov_matrix(3,1,35)

    """
    if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
                      ') and then rotated '+str(angle)+' degrees'
    cov_orig      = np.zeros([2,2])
    cov_orig[0,0] = sigmay**2.0
    cov_orig[1,1] = sigmax**2.0

    angle_rad     = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
    c, s          = np.cos(angle_rad), np.sin(angle_rad)
    rotmatrix     = np.matrix([[c, -s], [s, c]])

    cov_rot       = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix))  # performing rot * cov * rot^T

    return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def normalize_2D_cov_matrix(covmatrix,verbose=True):
    """
    Calculate the normalization foctor for a multivariate gaussian from it's covariance matrix
    However, not that gaussian returned by tu.gen_2Dgauss() is normalized for scale=1

    --- INPUT ---
    covmatrix       covariance matrix to normaliz
    verbose         Toggle verbosity

    """
    detcov  = np.linalg.det(covmatrix)
    normfac = 1.0 / (2.0 * np.pi * np.sqrt(detcov) )

    return normfac
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def f(r,theta):
    out = np.sin(theta)*np.cos(K*2*np.pi*(1./r))/r
    out[-1] = 0
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def dfdr(r,theta):
    out = (2*K*np.pi*np.sin(2*np.pi*K/r)
           -r*np.cos(2*np.pi*K/r))*np.sin(theta)/(r**3)
    out[-1] = 0
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def dfdrdtheta(r,theta):
    out = (2*K*np.pi*np.sin(2*np.pi*K/r)
           -r*np.cos(2*np.pi*K/r))*np.cos(theta)/(r**3)
    out[-1] = 0
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def __init__(self,
                 order_X,r_h,
                 order_theta,
                 theta_min = 0,
                 theta_max = np.pi,
                 L=1):
        """Constructor.

        Parameters
        ----------
        order_X     -- polynomial order in X direction
        r_h         -- physical minimum radius (uncompactified coordinates)
        order_theta -- polynomial order in theta direction
        theta_min   -- minimum longitudinal value. Should be no less than 0.
        theta_max   -- maximum longitudinal value. Should be no greater than pi.
        L           -- Characteristic length scale of problem.
                       Needed for compactification
        """
        self.order_X = order_X
        self.order_theta = order_theta
        self.r_h = r_h
        self.theta_min = theta_min
        self.theta_max = theta_max
        self.L = L
        super(PyballdDiscretization,self).__init__(order_X,
                                            self.X_min,self.X_max,
                                            order_theta,
                                            theta_min,theta_max)
        self.r = self.get_r_from_X(self.x)
        self.R = self.get_r_from_X(self.X)
        self.dRdX = self.get_drdX(self.X)
        self.drdX = self.get_drdX(self.x)
        self.dXdR = self.get_dXdr(self.X)
        self.dXdr = self.get_dXdr(self.x)
        self.d2XdR2 = self.get_d2Xdr2(self.X)
        self.d2Xdr2 = self.get_d2Xdr2(self.x)
        self.d2RdX2 = self.get_d2rdX2(self.X)
        self.d2rdX2 = self.get_d2rdX2(self.x)
        self.theta = self.y
        self.THETA = self.Y
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def get_integration_weights(order,nodes=None):
    """
    Returns the integration weights for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    See: arXive:gr-qc/0609020v1
    """
    if np.all(nodes == False):
        nodes=get_quadrature_points(order)
    if poly == polynomial.chebyshev.Chebyshev:
        weights = np.empty((order+1))
        weights[1:-1] = np.pi/order
        weights[0] = np.pi/(2*order)
        weights[-1] = weights[0]
        return weights
    elif poly == polynomial.legendre.Legendre:
        interior_weights = 2/((order+1)*order*poly.basis(order)(nodes[1:-1])**2)
        boundary_weights = np.array([1-0.5*np.sum(interior_weights)])
        weights = np.concatenate((boundary_weights,
                                  interior_weights,
                                  boundary_weights))
        return weights
    else:
        raise ValueError("Not a known polynomial type.")
        return False
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))