Python numpy 模块,tan() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.tan()

项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
    bin_range = (0, 360)
    a = (np.arange(*bin_range))
    f_a = nuth_func(a, fit[0], fit[1], fit[2])
    nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
    if xdata.size > maxpts:
        import random
        idx = random.sample(list(range(xdata.size)), 10000)
    else:
        idx = np.arange(xdata.size)
    f, ax = plt.subplots()
    ax.set_xlabel('Aspect (deg)')
    ax.set_ylabel('dh/tan(slope) (m)')
    ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
    ax.plot(x, y, 'ro', label='Bin median')
    ax.axhline(color='k')
    ax.plot(a, f_a, 'b', label=nuth_func_str)
    ax.set_xlim(*bin_range)
    pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
    ax.set_ylim(y.min() - pad, y.max() + pad)
    ax.legend(prop={'size':8})
    return f 

#Function copied from from openPIV pyprocess
项目:ModernGL-Volume-Raycasting-Example    作者:ulricheck    | 项目源码 | 文件源码
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.camera_center += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.camera_center - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.camera_fov * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.camera_center = self.camera_center + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.opts['center'] - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
    """
    Construct a path outlining an arrow with the given dimensions.
    The arrow points in the -x direction with tip positioned at 0,0.
    If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
    If *tailLen* is None, no tail will be drawn.
    """
    headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
    path = QtGui.QPainterPath()
    path.moveTo(0,0)
    path.lineTo(headLen, -headWidth)
    if tailLen is None:
        innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, 0)
    else:
        tailWidth *= 0.5
        innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, -tailWidth)
        path.lineTo(headLen + tailLen, -tailWidth)
        path.lineTo(headLen + tailLen, tailWidth)
        path.lineTo(innerY, tailWidth)
    path.lineTo(headLen, headWidth)
    path.lineTo(0,0)
    return path
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
    """
    Construct a path outlining an arrow with the given dimensions.
    The arrow points in the -x direction with tip positioned at 0,0.
    If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
    If *tailLen* is None, no tail will be drawn.
    """
    headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
    path = QtGui.QPainterPath()
    path.moveTo(0,0)
    path.lineTo(headLen, -headWidth)
    if tailLen is None:
        innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, 0)
    else:
        tailWidth *= 0.5
        innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, -tailWidth)
        path.lineTo(headLen + tailLen, -tailWidth)
        path.lineTo(headLen + tailLen, tailWidth)
        path.lineTo(innerY, tailWidth)
    path.lineTo(headLen, headWidth)
    path.lineTo(0,0)
    return path
项目:planetplanet    作者:rodluger    | 项目源码 | 文件源码
def ecc(self, val):
        '''
        We need to update the time of pericenter passage whenever the
        eccentricty, longitude of pericenter, period, or time of transit
        changes. See the appendix in Shields et al. (2016).

        '''

        self._ecc = val
        fi = (3 * np.pi / 2.) - self._w
        self.tperi0 = self.t0 + (self.per * np.sqrt(1. - self.ecc * self.ecc) /
                     (2. * np.pi) * (self.ecc * np.sin(fi) /
                     (1. + self.ecc * np.cos(fi)) - 2.
                     / np.sqrt(1. - self.ecc * self.ecc)
                     * np.arctan2(np.sqrt(1. - self.ecc * self.ecc)
                     * np.tan(fi/2.), 1. + self.ecc)))
项目:modesolverpy    作者:jtambasco    | 项目源码 | 文件源码
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
                              x_top_right, x_bot_left, n_material):
        angle = np.radians(angle)
        trap_len = (y_top_right - y_bot_left) / np.tan(angle)
        num_x_iterations = round(trap_len/self.x_step)
        y_per_iteration = round(self.y_pts / num_x_iterations)

        lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
        rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)

        for i, _ in enumerate(xy_mask):
            xy_mask[i][:lhs_x_start_index] = False
            xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True

            if i % y_per_iteration == 0:
                lhs_x_start_index -= 1
                rhs_x_stop_index += 1

        self.n[xy_mask] = n_material
        return self.n
项目:accpy    作者:kramerfelix    | 项目源码 | 文件源码
def edge(ev):
    rho = ev[2]     # bending radius
    phi = ev[3]     # edge angle
    # distance between pole shoes g * pole shoe form faktor K
    gK = ev[5]      # K is ~0.45 for rectangular  and ~0.7 for Rogowski
    R = eye(6)
    tanphi = tan(phi)
    R[1, 0] = tanphi/rho
    if gK != 0:
        cosphi = cos(phi)
        sinphi = sin(phi)
        # Hinterberger 4.79 (exakt)
        R[3, 2] = -(tanphi-gK/rho*(1+(sinphi)**2)/(cosphi**3))/rho
        # Madx and Chao:
        # R[3,2] = -(tan(phi-gK/rho*(1+sinphi**2)/cosphi))/rho
    else:
        R[3, 2] = -tanphi/rho
    return R
项目:nmmn    作者:rsnemmen    | 项目源码 | 文件源码
def arcsec2pc(d=15.,a=1.):
    """
Given the input angular size and distance to the object, computes
the corresponding linear size in pc. 

:param d: distance in Mpc
:param a: angular size in arcsec
:returns: linear size in pc
    """

    # convert arcsec to radians
    a=a*4.848e-6    
    # convert distance to pc instead of Mpc
    d=d*1e6

    return d*numpy.tan(a)
项目:pyberny    作者:azag0    | 项目源码 | 文件源码
def eval(self, coords, grad=False):
        v1 = (coords[self.i]-coords[self.j])/bohr
        v2 = (coords[self.k]-coords[self.j])/bohr
        dot_product = np.dot(v1, v2)/(norm(v1)*norm(v2))
        if dot_product < -1:
            dot_product = -1
        elif dot_product > 1:
            dot_product = 1
        phi = np.arccos(dot_product)
        if not grad:
            return phi
        if abs(phi) > pi-1e-6:
            grad = [
                (pi-phi)/(2*norm(v1)**2)*v1,
                (1/norm(v1)-1/norm(v2))*(pi-phi)/(2*norm(v1))*v1,
                (pi-phi)/(2*norm(v2)**2)*v2
            ]
        else:
            grad = [
                1/np.tan(phi)*v1/norm(v1)**2-v2/(norm(v1)*norm(v2)*np.sin(phi)),
                (v1+v2)/(norm(v1)*norm(v2)*np.sin(phi)) -
                1/np.tan(phi)*(v1/norm(v1)**2+v2/norm(v2)**2),
                1/np.tan(phi)*v2/norm(v2)**2-v1/(norm(v1)*norm(v2)*np.sin(phi))
            ]
        return phi, grad
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def genericCameraMatrix(shape, angularField=60):
    '''
    Return a generic camera matrix
    [[fx, 0, cx],
    [ 0, fy, cy],
    [ 0, 0,   1]]
    for a given image shape
    '''
    # http://nghiaho.com/?page_id=576
    # assume that the optical centre is in the middle:
    cy = int(shape[0] / 2)
    cx = int(shape[1] / 2)

    # assume that the FOV is 60 DEG (webcam)
    fx = fy = cx / np.tan(angularField / 2 * np.pi /
                          180)  # camera focal length
    # see
    # http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
    return np.array([[fx, 0, cx],
                     [0, fy, cy],
                     [0, 0, 1]
                     ], dtype=np.float32)
项目:visual_mpc    作者:febert    | 项目源码 | 文件源码
def mujoco_to_imagespace(self, mujoco_coord, numpix=64, truncate=False):
        """
        convert form Mujoco-Coord to numpix x numpix image space:
        :param numpix: number of pixels of square image
        :param mujoco_coord:
        :return: pixel_coord
        """
        viewer_distance = .75  # distance from camera to the viewing plane
        window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
        pixelheight = window_height / numpix  # height of one pixel
        pixelwidth = pixelheight
        window_width = pixelwidth * numpix
        middle_pixel = numpix / 2
        pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                              pixelwidth + np.array([middle_pixel, middle_pixel]))
        pixel_coord = pixel_coord.astype(int)

        return pixel_coord
项目:Jensen3D    作者:byuflowlab    | 项目源码 | 文件源码
def __init__(self, nTurbs):
        super(effectiveVelocity, self).__init__()

        self.fd_options['form'] = 'central'
        self.fd_options['step_size'] = 1.0e-6
        self.fd_options['step_type'] = 'relative'

        self.nTurbs = nTurbs
        self.add_param('xr', val=np.zeros(nTurbs))
        self.add_param('r', val=np.zeros(nTurbs))
        self.add_param('alpha', val=np.tan(0.1))
        self.add_param('windSpeed', val=0.0)
        self.add_param('a', val=1./3.)
        self.add_param('overlap', val=np.empty([nTurbs, nTurbs]))

        self.add_output('hubVelocity', val=np.zeros(nTurbs))
项目:Jensen3D    作者:byuflowlab    | 项目源码 | 文件源码
def conferenceWakeOverlap(X, Y, R):

    n = np.size(X)

    # theta = np.zeros((n, n), dtype=np.float)        # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing

    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                z = R/np.tan(0.34906585)
                # print z
                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))
                # print 'theta =', theta
                if -0.34906585 < theta < 0.34906585:
                    f_theta[i][j] = (1 + np.cos(9*theta))/2
                    # print f_theta

    # print z
    # print f_theta
    return f_theta
项目:Jensen3D    作者:byuflowlab    | 项目源码 | 文件源码
def conferenceWakeOverlap_tune(X, Y, R, boundAngle):

    n = np.size(X)
    boundAngle = boundAngle*np.pi/180.0
    # theta = np.zeros((n, n), dtype=np.float)      # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing
    q = np.pi/boundAngle                            # factor inside the cos term of the smooth Jensen (see Jensen1983 eq.(3))
    # print 'boundAngle = %s' %boundAngle, 'q = %s' %q
    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                # z = R/tan(0.34906585)
                z = R/np.tan(boundAngle)               # distance from fulcrum to wake producing turbine
                # print z
                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))
                # print 'theta =', theta

                if -boundAngle < theta < boundAngle:

                    f_theta[i][j] = (1. + np.cos(q*theta))/2.
                    # print f_theta

    # print z
    # print f_theta
    return f_theta
项目:Jensen3D    作者:byuflowlab    | 项目源码 | 文件源码
def get_cosine_factor_original(X, Y, R0, bound_angle=20.0):

    n = np.size(X)
    bound_angle = bound_angle*np.pi/180.0
    # theta = np.zeros((n, n), dtype=np.float)      # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing
    q = np.pi/bound_angle                           # factor inside the cos term of the smooth Jensen (see Jensen1983 eq.(3))

    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                z = R0/np.tan(bound_angle)               # distance from fulcrum to wake producing turbine

                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))

                if -bound_angle < theta < bound_angle:

                    f_theta[i][j] = (1. + np.cos(q*theta))/2.

    return f_theta
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def mujoco_to_imagespace(mujoco_coord, numpix=64):
    """
    convert form Mujoco-Coord to numpix x numpix image space:
    :param numpix: number of pixels of square image
    :param mujoco_coord:
    :return: pixel_coord
    """
    viewer_distance = .75  # distance from camera to the viewing plane
    window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
    pixelheight = window_height / numpix  # height of one pixel
    pixelwidth = pixelheight
    window_width = pixelwidth * numpix
    middle_pixel = numpix / 2
    pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                          pixelwidth + np.array([middle_pixel, middle_pixel]))
    pixel_coord = pixel_coord.astype(int)
    return pixel_coord
项目:PyFrac    作者:GeoEnergyLab-EPFL    | 项目源码 | 文件源码
def TipAsym_UniversalW_delt_Res(w, *args):
    """The residual function zero of which will give the General asymptote """

    (dist, Kprime, Eprime, muPrime, Cbar, Vel) = args

    Kh = Kprime * dist ** 0.5 / (Eprime * w)
    Ch = 2 * Cbar * dist ** 0.5 / (Vel ** 0.5 * w)
    sh = muPrime * Vel * dist ** 2 / (Eprime * w ** 3)

    g0 = f(Kh, 0.9911799823 * Ch, 10.392304845)
    delt = 10.392304845 * (1 + 0.9911799823 * Ch) * g0

    C1 = 4 * (1 - 2 * delt) / (delt * (1 - delt)) * np.tan(np.pi * delt)
    C2 = 16 * (1 - 3 * delt) / (3 * delt * (2 - 3 * delt)) * np.tan(3 * np.pi * delt / 2)
    b = C2 / C1

    return sh - f(Kh, Ch * b, C1)
项目:PyFrac    作者:GeoEnergyLab-EPFL    | 项目源码 | 文件源码
def TipAsym_Universal_delt_Res(dist, *args):
    """More precise function to be minimized to find root for universal Tip asymptote (see Donstov and Pierce)"""

    (wEltRibbon, Kprime, Eprime, muPrime, Cbar, DistLstTSEltRibbon, dt) = args

    Vel = (dist - DistLstTSEltRibbon) / dt
    Kh = Kprime * dist ** 0.5 / (Eprime * wEltRibbon)
    Ch = 2 * Cbar * dist ** 0.5 / (Vel ** 0.5 * wEltRibbon)
    sh = muPrime * Vel * dist ** 2 / (Eprime * wEltRibbon ** 3)

    g0 = f(Kh, 0.9911799823 * Ch, 10.392304845)
    delt = 10.392304845 * (1 + 0.9911799823 * Ch) * g0

    C1 = 4 * (1 - 2 * delt) / (delt * (1 - delt)) * np.tan(math.pi * delt)
    C2 = 16 * (1 - 3 * delt) / (3 * delt * (2 - 3 * delt)) * np.tan(3 * math.pi * delt / 2)
    b = C2 / C1

    return sh - f(Kh, Ch * b, C1)


# ----------------------------------------------------------------------------------------------------------------------
项目:vsi_common    作者:VisionSystemsInc    | 项目源码 | 文件源码
def construct_K(image_size, focal_len=None, fov_degrees=None, fov_radians=None):
  """ create calibration matrix K using the image size and focal length or field of view angle
  Assumes 0 skew and principal point at center of image
  Note that image_size = (width, height)
  Note that fov is assumed to be measured horizontally
  """
  if not np.sum([focal_len is not None, fov_degrees is not None, fov_radians is not None]) == 1:
    raise Exception('Specify exactly one of [focal_len, fov_degrees, fov_radians]')

  if fov_degrees is not None:
    fov_radians = np.deg2rad(fov_degrees)
  if fov_radians is not None:
    focal_len = image_size[0] / (2.0 * np.tan(fov_radians/2.0))

  K = np.array([[focal_len, 0, image_size[0]/2.0], [0, focal_len, image_size[1]/2.0], [0, 0, 1]])
  return K
项目:OpenGoddard    作者:istellartech    | 项目源码 | 文件源码
def inequality(prob, obj):
    x = prob.states_all_section(0)
    y = prob.states_all_section(1)
    v = prob.states_all_section(2)
    theta = prob.controls_all_section(0)
    tf = prob.time_final(-1)

    result = Condition()

    # lower bounds
    result.lower_bound(tf, 0.1)
    result.lower_bound(y, 0)
    result.lower_bound(theta, 0)

    # upper bounds
    # result.upper_bound(theta, np.pi/2)
    # result.upper_bound(y, x * np.tan(obj.theta0) + obj.h)

    return result()
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def _dtheta_from_omega_matrix(theta):
    norm = np.linalg.norm(theta, axis=1)
    k = np.empty_like(norm)

    mask = norm > 1e-4
    nm = norm[mask]
    k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
    mask = ~mask
    nm = norm[mask]
    k[mask] = 1/12 + 1/720 * nm**2

    A = np.empty((norm.shape[0], 3, 3))
    skew = _skew_matrix_array(theta)
    A[:] = np.identity(3)
    A[:] += 0.5 * skew
    A[:] += k[:, None, None] * util.mm_prod(skew, skew)

    return A
项目:und_Sophie_2016    作者:SophieTh    | 项目源码 | 文件源码
def describe_ring(self,distance,harmonic_number,ring_number,t=None):
        if ring_number==0 :
            X=np.array([0.0])
            Y=X
        else :
            if t is None :
                t=np.linspace(0.0,0.5*np.pi,101)
            n=harmonic_number
            theta=self.angle_ring_number(harmonic_number=n, ring_number=ring_number)
            if distance==None :
                R=theta
            else :
                R=distance*np.tan(theta)
            X=R*np.cos(t)
            Y=R*np.sin(t)
        return X,Y

    # in photon /sec /1% /mrad*mrad
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def openGLPerspectiveMatrix(fovy, aspect, near, far):
    # print 'fovy:', fovy
    # print 'aspect:', aspect
    # print 'near:', near
    # print 'far:', far

    f = 1.0 / np.tan(fovy/2.0)

    # print 'f:', f

    return np.matrix([
    [f/aspect, 0, 0, 0],
    [0, f, 0, 0],
    [0, 0, (far+near)/(near-far), (2.0*near*far)/(near-far)],
    [0, 0, -1, 0]
    ], np.float32)
项目:theia    作者:bandang0    | 项目源码 | 文件源码
def basis(a):
    '''Returns two vectors u and v such that (a, u, v) is a direct ON basis.

    '''
    ez = np.array([0., 0., 1.], dtype = np.float64)

    if np.abs(np.dot(a, ez)) == 1.:
        u = np.dot(a, ez) * np.array([1., 0., 0.], dtype = np.float64)
        v = np.array([0., 1., 0.], dtype = np.float64)
        return u,v
    else:
        theta = np.arccos(np.dot(a, ez))

        u = ez/np.sin(theta) - a/np.tan(theta)
        v = np.cross(a, u)
        u = u/np.linalg.norm(u)
        v = v/np.linalg.norm(v)
        return u, v
项目:rmp    作者:iamprem    | 项目源码 | 文件源码
def nh_steer(self, q_nearest, q_rand, epsilon):
        """
        For a car like robot, where it takes two control input (u_speed, u_phi)
        All possible combinations of control inputs are generated and used to find the closest q_new to q_rand
        :param q_nearest:
        :param q_rand:
        :param epsilon:
        :return:
        """
        L = 20.0  # Length between midpoints of front and rear axle of the car like robot
        u_speed, u_phi = [-1.0, 1.0], [-math.pi/4, 0, math.pi/4]
        controls = list(itertools.product(u_speed, u_phi))
        # euler = lambda t_i, q, u_s, u_p, L: (u_s*math.cos(q[2]), u_s*math.sin(q[2]), u_s/L*math.tan(u_p))
        result = []
        ctrls_path = {c: [] for c in controls}
        for ctrl in controls:
            q_new = q_nearest
            for t_i in range(epsilon):  # h is assumed to be 1 here for euler integration
                q_new = tuple(map(add, q_new, self.euler(t_i, q_new, ctrl[0], ctrl[1], L)))
                ctrls_path[ctrl].append(q_new)
            result.append((ctrl[0], ctrl[1], q_new))
        q_news = [x[2] for x in result]
        _, _, idx = self.nearest_neighbour(q_rand, np.array(q_news))
        return result[idx], ctrls_path
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, snap, img, para = None):
        if not ips.imgtype in ('8-bit', 'rgb'):
            mid = (self.arange[0] + self.arange[1])/2 - para['bright']
            length = (self.arange[1] - self.arange[0])/np.tan(para['contrast']/180.0*np.pi)
            ips.range = (mid-length/2, mid+length/2)
            return
        if para == None: para = self.para
        mid = 128-para['bright']
        length = 255/np.tan(para['contrast']/180.0*np.pi)
        print(255/np.tan(para['contrast']/180.0*np.pi)/2)
        print(mid-length/2, mid+length/2)
        img[:] = snap
        if mid-length/2>0:
            np.subtract(img, mid-length/2, out=img, casting='unsafe')
            np.multiply(img, 255.0/length, out=img, casting='unsafe')
        else:
            np.multiply(img, 255.0/length, out=img, casting='unsafe')
            np.subtract(img, (mid-length/2)/length*255, out=img, casting='unsafe')
        img[snap<mid-length/2] = 0
        img[snap>mid+length/2] = 255
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def rotate_image(img, angle, crop):
    h, w = img.shape[:2]
    angle %= 360
    M_rotate = cv2.getRotationMatrix2D((w/2, h/2), angle, 1)
    img_rotated = cv2.warpAffine(img, M_rotate, (w, h))

    if crop:
        angle_crop = angle % 180
        if angle_crop > 90:
            angle_crop = 180 - angle_crop
        theta = angle_crop * np.pi / 180.0
        hw_ratio = float(h) / float(w)
        tan_theta = np.tan(theta)
        numerator = np.cos(theta) + np.sin(theta) * tan_theta
        r = hw_ratio if h > w else 1 / hw_ratio
        denominator = r * tan_theta + 1
        crop_mult = numerator / denominator
        w_crop = int(round(crop_mult*w))
        h_crop = int(round(crop_mult*h))
        x0 = int((w-w_crop)/2)
        y0 = int((h-h_crop)/2)

        img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop)

    return img_rotated
项目:OpenLaval    作者:istellartech    | 项目源码 | 文件源码
def make_upper_straight_line(self):
        """ make upper straight line """
        targetx = self.lower_concave_in_x_end
        x = self.upper_convex_in_x_end
        y = self.upper_convex_in_y_end
        targety = np.tan(np.deg2rad(self.beta_in)) * targetx + y - np.tan(np.deg2rad(self.beta_in)) * x
        self.upper_straight_in_x = [targetx, x]
        self.upper_straight_in_y = [targety, y]
        self.shift = - abs(self.lower_concave_in_y_end - targety)

        targetx = self.lower_concave_out_x_end
        x = self.upper_convex_out_x_end
        y = self.upper_convex_out_y_end
        targety = np.tan(np.deg2rad(self.beta_out)) * targetx + y - np.tan(np.deg2rad(self.beta_out)) * x
        self.upper_straight_out_x = [targetx, x]
        self.upper_straight_out_y = [targety, y]
项目:freecad-nurbs    作者:microelly2    | 项目源码 | 文件源码
def setpointRelativeZ(self,u,v,h=0,w=0,update=False):
        ''' set relative height and weight of a pole point '''

        u,v=v,u
        #self.g[v][u][2] = self.gBase[v][u][2] + h
        # unrestricted
        self.g[v][u][2] = self.gBase[v][u][2] + 100* np.tan(0.5*np.pi*h/101)
        sayW(("set  rel h, height",h,self.g[v][u][2]))

        if update:
            self.gBase=self.g.copy()

        try:
            wl=self.obj2.weights
            wl[v*self.obj2.nNodes_u+u]=w
            self.obj2.weights=wl
        except:
            sayexc()
项目:TA_example_labs    作者:mit-racecar    | 项目源码 | 文件源码
def publish_line(self):
        # find the two points that intersect between the fan angle lines and the found y=mx+c line
        x0 = self.c / (np.tan(FAN_ANGLE) - self.m)
        x1 = self.c / (-np.tan(FAN_ANGLE) - self.m)

        y0 = self.m*x0+self.c
        y1 = self.m*x1+self.c

        poly = Polygon()
        p0 = Point32()
        p0.y = x0
        p0.x = y0

        p1 = Point32()
        p1.y = x1
        p1.x = y1
        poly.points.append(p0)
        poly.points.append(p1)

        polyStamped = PolygonStamped()
        polyStamped.header.frame_id = "base_link"
        polyStamped.polygon = poly

        self.line_pub.publish(polyStamped)
项目:derplearning    作者:John-Ellis    | 项目源码 | 文件源码
def road_mapper(self, frame):

        road_spots = self.road_spotter(frame)

        #road_map = np.zeros(np.shape(road_spots), np.float)

        #First we deal with all the points
        road_map[:, 1, :] = self.camera_height * np.tan(self.camera_to_ground_arc + 
                    road_spots[:, 0, :] * self.camera_arc_x/self.crop_ratio[1])
        road_map[:, 0, :] = np.multiply( np.power( ( np.power(self.camera_height, 2) +
                     np.power(road_map[:, 1, :], 2) ), 0.5 ) , np.tan(self.camera_offset_y +
                      (road_spots[:, 1, :]-0.5)*self.camera_arc_y ) )

        return road_map

    # pilot_mk1 is currently only built for low speed operation
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, pose, zmin=0.0, zmax=0.1, fov=np.deg2rad(60)): 
        # FoV derived from fx,fy,cx,cy=500,500,320,240
        # fovx, fovy = 65.23848614  51.28201165
        rx, ry = 0.638, 0.478

        self.pose = pose
        arr = [np.array([-rx, -ry, 1.]) * zmin,
               np.array([-rx,  ry, 1.]) * zmin,
               np.array([ rx,  ry, 1.]) * zmin,
               np.array([ rx, -ry, 1.]) * zmin,

               np.array([-rx, -ry, 1.]) * zmax,
               np.array([-rx,  ry, 1.]) * zmax,
               np.array([ rx,  ry, 1.]) * zmax,
               np.array([ rx, -ry, 1.]) * zmax]

        # vertices: nul, nll, nlr, nur, ful, fll, flr, fur
        self.vertices_ = self.pose * np.vstack(arr)

        # self.near, self.far = np.array([0,0,zmin]), np.array([0,0,zmax])
        # self.near_off, self.far_off = np.tan(fov / 2) * zmin, np.tan(fov / 2) * zmax

        # arr = [self.near + np.array([-1, -1, 0]) * self.near_off, 
        #        self.near + np.array([1, -1, 0]) * self.near_off, 
        #        self.near + np.array([1, 1, 0]) * self.near_off, 
        #        self.near + np.array([-1, 1, 0]) * self.near_off, 

        #        self.far + np.array([-1, -1, 0]) * self.far_off, 
        #        self.far + np.array([1, -1, 0]) * self.far_off, 
        #        self.far + np.array([1, 1, 0]) * self.far_off, 
        #        self.far + np.array([-1, 1, 0]) * self.far_off]

        # nll, nlr, nur, nul, fll, flr, fur, ful = self.pose * np.vstack(arr)
        # return nll, nlr, nur, nul, fll, flr, fur, ful
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def from_calib_params_fov(cls, fov, cx, cy, D=np.zeros(5, dtype=np.float64), shape=None): 
        return cls(construct_K(cx / np.tan(fov), cy / np.tan(fov), cx, cy), D=D, shape=shape)
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def bin_stats(x, y, stat='median', nbins=360):
    import scipy.stats
    #bin_range = (x.min(), x.max())
    bin_range = (0., 360.)
    bin_width = (bin_range[1] - bin_range[0]) / nbins
    print("Computing 1-degree bin statistics: %s" % stat)
    bin_stat, bin_edges, bin_number = scipy.stats.binned_statistic(x, y, \
            statistic=stat, bins=nbins, range=bin_range)
    bin_centers = bin_edges[:-1] + bin_width/2.
    bin_stat = np.ma.masked_invalid(bin_stat)

    """
    #Mask bins in grid directions, can potentially contain biased stats
    badbins = [0, 45, 90, 180, 225, 270, 315]
    bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
    bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
    """

    #Generate plots
    if False:
        plt.figure()
        #Need to pull out original values for each bin
        #Loop through unique bin numbers, pull out original values into list of lists
        #plt.boxplot(bin_stat, sym='')
        plt.xlim(*bin_range)
        plt.xticks(np.arange(bin_range[0],bin_range[1],30))
        plt.ylabel('dh/tan(slope) (m)')
        plt.xlabel('Aspect (1-deg bins)')

        plt.figure()
        plt.bar(bin_centers, bin_count)
        plt.xlim(*bin_range)
        plt.xticks(np.arange(bin_range[0],bin_range[1],30))
        plt.ylabel('Count')
        plt.xlabel('Aspect (1-deg bins)')
        plt.show()

    return bin_stat, bin_edges, bin_centers

#Function for fitting Nuth and Kaab (2011)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def pixelSize(self, pos):
        """
        Return the approximate size of a screen pixel at the location pos
        Pos may be a Vector or an (N,3) array of locations
        """
        cam = self.cameraPosition()
        if isinstance(pos, np.ndarray):
            cam = np.array(cam).reshape((1,)*(pos.ndim-1)+(3,))
            dist = ((pos-cam)**2).sum(axis=-1)**0.5
        else:
            dist = (pos-cam).length()
        xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)
        return xDist / self.width()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def pixelSize(self, pos):
        """
        Return the approximate size of a screen pixel at the location pos
        Pos may be a Vector or an (N,3) array of locations
        """
        cam = self.cameraPosition()
        if isinstance(pos, np.ndarray):
            cam = np.array(cam).reshape((1,)*(pos.ndim-1)+(3,))
            dist = ((pos-cam)**2).sum(axis=-1)**0.5
        else:
            dist = (pos-cam).length()
        xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)
        return xDist / self.width()
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def getInscribedRectangle(self, angle, rectSz):
        """
        From https://stackoverflow.com/questions/5789239/calculate-largest-rectangle-in-a-rotated-rectangle
        :param angle: angle in radians
        :param rectSz: rectangle size
        :return:
        """

        imgSzw = rectSz[0]
        imgSzh = rectSz[1]

        quadrant = int(numpy.floor(angle / (numpy.pi / 2.))) & 3
        sign_alpha = angle if (quadrant & 1) == 0 else numpy.pi - angle
        alpha = (sign_alpha % numpy.pi + numpy.pi) % numpy.pi

        bbw = imgSzw * numpy.cos(alpha) + imgSzh * numpy.sin(alpha)
        bbh = imgSzw * numpy.sin(alpha) + imgSzh * numpy.cos(alpha)

        gamma = numpy.arctan2(bbw, bbh) if imgSzw < imgSzh else numpy.arctan2(bbh, bbw)
        delta = numpy.pi - alpha - gamma

        length = imgSzh if imgSzw < imgSzh else imgSzw
        d = length * numpy.cos(alpha)
        a = d * numpy.sin(alpha) / numpy.sin(delta)

        y = a * numpy.cos(gamma)
        x = y * numpy.tan(gamma)

        return (int(x), int(y), int(x + bbw - 2*x), int(y + bbh - 2*y))
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def hexagonal_uniform(N,as_complex=False):
    'returns uniformly distributed points of shape=(2,N) within a hexagon whose minimum radius is 1.0'
    phi = 2*pi/6 *.5
    S = np.array( [[1,1],np.tan([phi,-phi])] ) # vectors to vertices of next hexagon ( centered at (2,0) )a
    # uniformly sample the parallelogram defined by the columns of S
    v = np.matmul(S,np.random.uniform(0,1,(2,N)))
    v[0] = 1 - abs(v[0]-1) # fold back to make a triangle
    c = (v[0] + 1j*v[1]) * np.exp( 2j*pi/6*np.floor( np.random.uniform(0,6,N) ) ) # rotate to a random sextant
    if as_complex:
        return c
    else:
        return np.array( (c.real,c.imag) )
项目:AgileTrainer    作者:diyjac    | 项目源码 | 文件源码
def calc_lookahead_offset(v_ego, angle_steers, d_lookahead, angle_offset=0):
    #*** this function returns the lateral offset given the steering angle, speed and the lookahead distance
    curvature = calc_curvature(v_ego, angle_steers, angle_offset)

    # clip is to avoid arcsin NaNs due to too sharp turns
    y_actual = d_lookahead * np.tan(np.arcsin(np.clip(d_lookahead * curvature, -0.999, 0.999))/2.)
    return y_actual, curvature
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic_ufuncs(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.cos(x), cos(xm))
        assert_equal(np.cosh(x), cosh(xm))
        assert_equal(np.sin(x), sin(xm))
        assert_equal(np.sinh(x), sinh(xm))
        assert_equal(np.tan(x), tan(xm))
        assert_equal(np.tanh(x), tanh(xm))
        assert_equal(np.sqrt(abs(x)), sqrt(xm))
        assert_equal(np.log(abs(x)), log(xm))
        assert_equal(np.log10(abs(x)), log10(xm))
        assert_equal(np.exp(x), exp(xm))
        assert_equal(np.arcsin(z), arcsin(zm))
        assert_equal(np.arccos(z), arccos(zm))
        assert_equal(np.arctan(z), arctan(zm))
        assert_equal(np.arctan2(x, y), arctan2(xm, ym))
        assert_equal(np.absolute(x), absolute(xm))
        assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
        assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
        assert_equal(np.equal(x, y), equal(xm, ym))
        assert_equal(np.not_equal(x, y), not_equal(xm, ym))
        assert_equal(np.less(x, y), less(xm, ym))
        assert_equal(np.greater(x, y), greater(xm, ym))
        assert_equal(np.less_equal(x, y), less_equal(xm, ym))
        assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
        assert_equal(np.conjugate(x), conjugate(xm))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncRegression(self):
        # Tests new ufuncs on MaskedArrays.
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor',
                  ]:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(numpy.ma.core, f)
            args = self.d[:uf.nin]
            ur = uf(*args)
            mr = mf(*args)
            assert_equal(ur.filled(0), mr.filled(0), f)
            assert_mask_equal(ur.mask, mr.mask, err_msg=f)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncs1(self):
        # Test various functions such as sin, cos.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.cos(x), cos(xm)))
        self.assertTrue(eq(np.cosh(x), cosh(xm)))
        self.assertTrue(eq(np.sin(x), sin(xm)))
        self.assertTrue(eq(np.sinh(x), sinh(xm)))
        self.assertTrue(eq(np.tan(x), tan(xm)))
        self.assertTrue(eq(np.tanh(x), tanh(xm)))
        with np.errstate(divide='ignore', invalid='ignore'):
            self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
            self.assertTrue(eq(np.log(abs(x)), log(xm)))
            self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
        self.assertTrue(eq(np.exp(x), exp(xm)))
        self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
        self.assertTrue(eq(np.arccos(z), arccos(zm)))
        self.assertTrue(eq(np.arctan(z), arctan(zm)))
        self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
        self.assertTrue(eq(np.absolute(x), absolute(xm)))
        self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
        self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
        self.assertTrue(eq(np.less(x, y), less(xm, ym)))
        self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
        self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
        self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
        self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
        self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
        self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testUfuncRegression(self):
        f_invalid_ignore = [
            'sqrt', 'arctanh', 'arcsin', 'arccos',
            'arccosh', 'arctanh', 'log', 'log10', 'divide',
            'true_divide', 'floor_divide', 'remainder', 'fmod']
        for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
                  'sin', 'cos', 'tan',
                  'arcsin', 'arccos', 'arctan',
                  'sinh', 'cosh', 'tanh',
                  'arcsinh',
                  'arccosh',
                  'arctanh',
                  'absolute', 'fabs', 'negative',
                  'floor', 'ceil',
                  'logical_not',
                  'add', 'subtract', 'multiply',
                  'divide', 'true_divide', 'floor_divide',
                  'remainder', 'fmod', 'hypot', 'arctan2',
                  'equal', 'not_equal', 'less_equal', 'greater_equal',
                  'less', 'greater',
                  'logical_and', 'logical_or', 'logical_xor']:
            try:
                uf = getattr(umath, f)
            except AttributeError:
                uf = getattr(fromnumeric, f)
            mf = getattr(np.ma, f)
            args = self.d[:uf.nin]
            with np.errstate():
                if f in f_invalid_ignore:
                    np.seterr(invalid='ignore')
                if f in ['arctanh', 'log', 'log10']:
                    np.seterr(divide='ignore')
                ur = uf(*args)
                mr = mf(*args)
            self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
            self.assertTrue(eqmask(ur.mask, mr.mask))
项目:Sverchok    作者:Sverchok    | 项目源码 | 文件源码
def tangent(x: Number = 0.0) -> Number:
    return np.tan(x)
项目:python-gltf-experiments    作者:jzitelli    | 项目源码 | 文件源码
def calc_projection_matrix(camera):
    if 'perspective' in camera:
        f = 1 / np.tan(camera['perspective']['yfov'] / 2)
        znear, zfar = camera['perspective']['znear'], camera['perspective']['zfar']
        projection_matrix = np.array([[f / camera['perspective']['aspectRatio'], 0, 0, 0],
                                      [0, f, 0, 0],
                                      [0, 0, (znear + zfar) / (znear - zfar), 2 * znear * zfar / (znear - zfar)],
                                      [0, 0, -1, 0]], dtype=np.float32)
    elif 'orthographic' in camera:
        raise Exception('TODO')
    return projection_matrix
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def mk_line(n1,theta):
    x = np.linspace(0,n1-1,n1)
    y = np.copy(x)*0
    count = 0
    for t in x:
        y[count] = t*np.tan(theta*np.pi/180.)
        count +=1

    return x,y-y[-1]/2
项目:astk    作者:openalea-incubator    | 项目源码 | 文件源码
def daylength(dayofyear, year, latitude):
    """ estimate of daylength"""

    lat = numpy.radians(latitude)
    dec = declination(12, dayofyear, year)
    return 12 + 24 / numpy.pi * numpy.arcsin(numpy.tan(lat) - numpy.tan(dec))