Python numpy 模块,gradient() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.gradient()

项目:spfeas    作者:jgrss    | 项目源码 | 文件源码
def get_mag_ang(img):

    """
    Gets image gradient (magnitude) and orientation (angle)

    Args:
        img

    Returns:
        Gradient, orientation
    """

    img = np.sqrt(img)

    gx = cv2.Sobel(np.float32(img), cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(np.float32(img), cv2.CV_32F, 0, 1)

    mag, ang = cv2.cartToPolar(gx, gy)

    return mag, ang, gx, gy
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def calculate_angle_quality_thread(self,
                                       voi,
                                       gantry,
                                       couch,
                                       calculate_from=0,
                                       stepsize=1.0,
                                       q=None,
                                       avoid=[],
                                       voi_cube=None,
                                       gradient=True):
        """ TODO: Documentation
        """
        os.nice(1)
        for couch_angle in couch:
            qual = self.calculate_angle_quality(voi, gantry, couch_angle, calculate_from, stepsize, avoid, voi_cube,
                                                gradient)
            q.put({"couch": couch_angle, "gantry": gantry, "data": qual})
项目:triflow    作者:locie    | 项目源码 | 文件源码
def test_model_monovariate(func, var, par, k):
    model = Model(func, var, par)
    x, dx = np.linspace(0, 10, 100, retstep=True, endpoint=False)
    U = np.cos(x * 2 * np.pi / 10)
    fields = model.fields_template(x=x, U=U)
    parameters = dict(periodic=True, k=k)
    F = model.F(fields, parameters)
    J_sparse = model.J(fields, parameters)
    J_dense = model.J(fields, parameters, sparse=False)
    J_approx = model.F.diff_approx(fields, parameters)

    dxU = np.gradient(np.pad(U, 2, mode='wrap')) / dx
    dxxU = np.gradient(dxU) / dx

    dxU = dxU[2:-2]
    dxxU = dxxU[2:-2]

    assert np.isclose(F, k * dxxU, atol=1E-3).all()
    assert np.isclose(J_approx, J_sparse.todense(), atol=1E-3).all()
    assert np.isclose(J_approx, J_dense, atol=1E-3).all()
项目:triflow    作者:locie    | 项目源码 | 文件源码
def test_model_bivariate(func, var, par):
    model = Model(func, var, par)
    x, dx = np.linspace(0, 10, 100, retstep=True, endpoint=False)
    u = np.cos(x * 2 * np.pi / 10)
    v = np.sin(x * 2 * np.pi / 10)
    fields = model.fields_template(x=x, u=u, v=v)
    parameters = dict(periodic=True, k1=1, k2=1)
    F = model.F(fields, parameters)
    J_sparse = model.J(fields, parameters)
    J_dense = model.J(fields, parameters, sparse=False)
    J_approx = model.F.diff_approx(fields, parameters)

    dxu = np.gradient(np.pad(u, 2, mode='wrap')) / dx
    dxxu = np.gradient(dxu) / dx
    dxu = dxu[2:-2]
    dxxu = dxxu[2:-2]

    dxv = np.gradient(np.pad(v, 2, mode='wrap')) / dx
    dxxv = np.gradient(dxv) / dx
    dxv = dxv[2:-2]
    dxxv = dxxv[2:-2]
    assert np.isclose(F, np.vstack([dxxv, dxxu]).flatten('F'),
                      atol=1E-3).all()
    assert np.isclose(J_approx, J_sparse.todense(), atol=1E-3).all()
    assert np.isclose(J_approx, J_dense, atol=1E-3).all()
项目:blmath    作者:bodylabs    | 项目源码 | 文件源码
def inflection_points(points, axis, span):
    '''
    Find the list of vertices that preceed inflection points in a curve. The curve is differentiated
    with respect to the coordinate system defined by axis and span.

    axis: A vector representing the vertical axis of the coordinate system.
    span: A vector representing the the horiztonal axis of the coordinate system.

    returns: a list of points in space corresponding to the vertices that
    immediately preceed inflection points in the curve
    '''

    coords_on_span = points.dot(span)
    dx = np.gradient(coords_on_span)
    coords_on_axis = points.dot(axis)

    # Take the second order finite difference of the curve with respect to the
    # defined coordinate system
    finite_difference_2 = np.gradient(np.gradient(coords_on_axis, dx), dx)

    # Compare the product of all neighboring pairs of points in the second derivative
    # If a pair of points has a negative product, then the second derivative changes sign
    # at one of those points, signalling an inflection point
    is_inflection_point = [finite_difference_2[i] * finite_difference_2[i + 1] <= 0 for i in range(len(finite_difference_2) - 1)]

    inflection_point_indices = [i for i, b in enumerate(is_inflection_point) if b]

    if len(inflection_point_indices) == 0: # pylint: disable=len-as-condition
        return []

    return points[inflection_point_indices]
项目:Lyssandra    作者:ektormak    | 项目源码 | 文件源码
def calculate_sift_grid(self, image, gridH, gridW):

        H, W = image.shape
        Npatches = gridH.size
        feaArr = np.zeros((Npatches, Nsamples * Nangles))

        # calculate gradient
        GH, GW = gen_dgauss(self.sigma)
        IH = signal.convolve2d(image, GH, mode='same')
        IW = signal.convolve2d(image, GW, mode='same')
        Imag = np.sqrt(IH ** 2 + IW ** 2)
        Itheta = np.arctan2(IH, IW)
        Iorient = np.zeros((Nangles, H, W))
        for i in range(Nangles):
            Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0)
        for i in range(Npatches):
            currFeature = np.zeros((Nangles, Nsamples))
            for j in range(Nangles):
                currFeature[j] = np.dot(self.weights, \
                                        Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten())
            feaArr[i] = currFeature.flatten()
        return feaArr
项目:Lyssandra    作者:ektormak    | 项目源码 | 文件源码
def extract_sift_patches(self, image, gridH, gridW):
        # extracts the sift descriptor of patches
        # in positions (gridH,gridW) in the image
        H, W = image.shape
        Npatches = gridH.size
        feaArr = np.zeros((Npatches, Nsamples * Nangles))

        # calculate gradient
        GH, GW = gen_dgauss(self.sigma)
        IH = signal.convolve2d(image, GH, mode='same')
        IW = signal.convolve2d(image, GW, mode='same')
        Imag = np.sqrt(IH ** 2 + IW ** 2)
        Itheta = np.arctan2(IH, IW)
        Iorient = np.zeros((Nangles, H, W))
        for i in range(Nangles):
            Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0)
        for i in range(Npatches):
            currFeature = np.zeros((Nangles, Nsamples))
            for j in range(Nangles):
                currFeature[j] = np.dot(self.weights, \
                                        Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten())
            feaArr[i] = currFeature.flatten()
        # feaArr contains each descriptor in a row
        feaArr = self.normalize_sift(feaArr)
        return feaArr
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_specific_axes(self):
        # Testing that gradient can work on a given axis only
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x, axis=0), dx[0])
        assert_array_equal(gradient(x, axis=1), dx[1])
        assert_array_equal(gradient(x, axis=-1), dx[1])
        assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])

        # test axis=None which means all axes
        assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
        # and is the same as no axis keyword given
        assert_almost_equal(gradient(x, axis=None), gradient(x))

        # test vararg order
        assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
        # test maximal number of varargs
        assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)

        assert_raises(ValueError, gradient, x, axis=3)
        assert_raises(ValueError, gradient, x, axis=-3)
        assert_raises(TypeError, gradient, x, axis=[1,])
项目:LabelsManager    作者:SebastianoF    | 项目源码 | 文件源码
def contour_from_array_at_label_l(im_arr, l, thr=0.3, omit_axis=None, verbose=0):
    if verbose > 0:
        print(l)
    array_label_l = im_arr == l
    assert isinstance(array_label_l, np.ndarray)
    gra = np.gradient(array_label_l.astype(np.bool).astype(np.float64))
    if omit_axis is None:
        norm_gra = np.sqrt(gra[0] ** 2 + gra[1] ** 2 + gra[2] ** 2) > thr
    elif omit_axis == 'x':
        norm_gra = np.sqrt(gra[1] ** 2 + gra[2] ** 2) > thr
    elif omit_axis == 'y':
        norm_gra = np.sqrt(gra[0] ** 2 + gra[2] ** 2) > thr
    elif omit_axis == 'z':
        norm_gra = np.sqrt(gra[0] ** 2 + gra[1] ** 2) > thr
    else:
        raise IOError
    return norm_gra
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def computeWeightsLocallyNormalized(I, centered_gradient=True, norm_radius=45):
    h,w = I.shape[:2]

    if centered_gradient:
        gy,gx = np.gradient(I)[:2]
        gysq = (gy**2).mean(axis=2) if gy.ndim > 2 else gy**2
        gxsq = (gx**2).mean(axis=2) if gx.ndim > 2 else gx**2

        gxsq_local_mean = cv2.blur(gxsq, ksize=(norm_radius, norm_radius))
        gysq_local_mean = cv2.blur(gysq, ksize=(norm_radius, norm_radius))

        w_horizontal = np.exp( - gxsq * 1.0/(2*np.maximum(1e-6, gxsq_local_mean)))
        w_vertical = np.exp( - gysq * 1.0/(2*np.maximum(1e-6, gysq_local_mean)))

    else:
        raise Exception("NotImplementedYet")

    return w_horizontal, w_vertical
项目:PyGEOMET    作者:pygeomet    | 项目源码 | 文件源码
def rel_vort(u,v,dx,dy):

    #get the gradient of the wind in the u- and v-directions
    du = np.gradient(u)
    dv = np.gradient(v)

    #compute the relative vorticity (units : 10^-5 s^-1)
    vort = ((dv[-1]/dx) - (du[-2]/dy))*pow(10,5) 

    #return the vorticity (Units: 10^-5 s-1) 
    return vort

###############################################################################
##########################  End function rel_vort()  ##########################
###############################################################################


#################### 25.Begin function of wind_chill()   ######################
## Required libraries: numpy                                                  #
##                                                                            #
## Inputs: t2m = ndarray of 2-meter temperature values (Units: F)             #
##                                                                            #
##         wind = ndarray of 10-meter bulk wind values (Units: MPH)           #
##                                                                            #
###############################################################################
项目:PyGEOMET    作者:pygeomet    | 项目源码 | 文件源码
def richardson(self):
        u = self.dataSet.readNCVariable('U')
        v = self.dataSet.readNCVariable('V')
        u_corr = wrf.unstaggerX(u)
        v_corr = wrf.unstaggerY(v)
        wind = (u_corr*u_corr + v_corr*v_corr)**(0.5)
        height = wrf.unstaggerZ(self.height)

        #compute the vertical gradient of theta
        dtheta = np.gradient(self.theta)[0]
        du = np.gradient(wind)[0]
        dz = np.gradient(height)[0]

        #compute the richardson number
        self.u10 = u_corr
        self.v10 = v_corr
        self.var = ((self.g/self.theta)*(dtheta/dz))/(du/dz)**(2)
        self.var2 = self.var
        self.varTitle = "Richardson Number\n" + self.dataSet.getTime()
        #Set short variable title for time series
        self.sTitle = "Richardson Number"
项目:wake-exchange    作者:byuflowlab    | 项目源码 | 文件源码
def smooth_abs(x, dx=0.01):
    """smoothed version of absolute vaue function, with quadratic instead of sharp bottom.
    Derivative w.r.t. variable of interest.  Width of quadratic can be controlled"""

    x, n = _checkIfFloat(x)

    y = np.abs(x)
    idx = np.logical_and(x > -dx, x < dx)
    y[idx] = x[idx]**2/(2.0*dx) + dx/2.0

    # gradient
    dydx = np.ones_like(x)
    dydx[x <= -dx] = -1.0
    dydx[idx] = x[idx]/dx


    if n == 1:
        y = y[0]
        dydx = dydx[0]

    return y, dydx
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_specific_axes(self):
        # Testing that gradient can work on a given axis only
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x, axis=0), dx[0])
        assert_array_equal(gradient(x, axis=1), dx[1])
        assert_array_equal(gradient(x, axis=-1), dx[1])
        assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])

        # test axis=None which means all axes
        assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
        # and is the same as no axis keyword given
        assert_almost_equal(gradient(x, axis=None), gradient(x))

        # test vararg order
        assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
        # test maximal number of varargs
        assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)

        assert_raises(ValueError, gradient, x, axis=3)
        assert_raises(ValueError, gradient, x, axis=-3)
        assert_raises(TypeError, gradient, x, axis=[1,])
项目:computer-vision-algorithms    作者:aleju    | 项目源码 | 文件源码
def main():
    """Load image, compute derivatives, plot."""
    img = data.camera()
    imgy_np, imgx_np = np.gradient(img)
    imgx_ski, imgy_ski = _compute_derivatives(img)

    # dx
    util.plot_images_grayscale(
        [img, dx_symmetric(img), dx_forward(img), dx_backward(img), imgx_np, imgx_ski],
        ["Image", "dx (symmetric)", "dx (forward)", "dx (backward)",
         "Ground Truth (numpy)", "Ground Truth (scikit-image)"]
    )

    # dy
    util.plot_images_grayscale(
        [img, dy_symmetric(img), dy_forward(img), dy_backward(img), imgy_np, imgy_ski],
        ["Image", "dy (symmetric)", "dy (forward)", "dy (backward)",
         "Ground Truth (numpy)", "Ground Truth (scikit-image)"]
    )
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_specific_axes(self):
        # Testing that gradient can work on a given axis only
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x, axis=0), dx[0])
        assert_array_equal(gradient(x, axis=1), dx[1])
        assert_array_equal(gradient(x, axis=-1), dx[1])
        assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])

        # test axis=None which means all axes
        assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
        # and is the same as no axis keyword given
        assert_almost_equal(gradient(x, axis=None), gradient(x))

        # test vararg order
        assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
        # test maximal number of varargs
        assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)

        assert_raises(ValueError, gradient, x, axis=3)
        assert_raises(ValueError, gradient, x, axis=-3)
        assert_raises(TypeError, gradient, x, axis=[1,])
项目:crowddynamics    作者:jaantollander    | 项目源码 | 文件源码
def direction_map(dmap: DistanceMap):
    r"""Computes normalized gradient of distance map. Not defined when length of
    the gradient is zero.

    .. math::
       \hat{\mathbf{e}}_{S} = -\frac{\nabla S(\mathbf{x})}{\| \nabla S(\mathbf{x}) \|}

    Args:
        dmap (numpy.ndarray):
            Distance map.

    Returns:
        DirectionMap: Direction map.
    """
    u, v = np.gradient(dmap)
    l = np.hypot(u, v)
    # Avoids zero division
    l[l == 0] = np.nan
    # Flip order from (row, col) to (x, y)
    return v / l, u / l


# Potentials
项目:pyNMR    作者:bennomeier    | 项目源码 | 文件源码
def __entropy(self, spectrum, m):
        """Calculates get m-th derivative of the real part of spectrum and
        returns entropy of its absolute value. """
        assert type(m) is int, 'm should be a (possitive) integer'
        assert m > 0, 'need m > 0'

        #get the real part of the spectrum
        spect = np.array(spectrum)
        spect = spect.real

        # calculate the m-th derivative of the real part of the spectrum
        spectrumDerivative = spect
        for i in range(m):
            spectrumDerivative = np.gradient(spectrumDerivative)

        # now get the entropy of the abslolute value of the m-th derivative:
        entropy = sp.stats.entropy(np.abs(spectrumDerivative))
        return entropy
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def threshold_gradients(self, grad_thresh):
        """Creates a new DepthImage by zeroing out all depths
        where the magnitude of the gradient at that point is
        greater than grad_thresh.

        Parameters
        ----------
        grad_thresh : float
            A threshold for the gradient magnitude.

        Returns
        -------
        :obj:`DepthImage`
            A new DepthImage created from the thresholding operation.
        """
        data = np.copy(self._data)
        gx, gy = self.gradients()
        gradients = np.zeros([gx.shape[0], gx.shape[1], 2])
        gradients[:, :, 0] = gx
        gradients[:, :, 1] = gy
        gradient_mags = np.linalg.norm(gradients, axis=2)
        ind = np.where(gradient_mags > grad_thresh)
        data[ind[0], ind[1]] = 0.0
        return DepthImage(data, self._frame)
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def normal_cloud_im(self):
        """Generate a NormalCloudImage from the PointCloudImage.

        Returns
        -------
        :obj:`NormalCloudImage`
            The corresponding NormalCloudImage.
        """
        gx, gy, _ = np.gradient(self.data)
        gx_data = gx.reshape(self.height * self.width, 3)
        gy_data = gy.reshape(self.height * self.width, 3)
        pc_grads = np.cross(gx_data, gy_data)  # default to point toward camera
        pc_grad_norms = np.linalg.norm(pc_grads, axis=1)
        normal_data = pc_grads / np.tile(pc_grad_norms[:, np.newaxis], [1, 3])
        normal_im_data = normal_data.reshape(self.height, self.width, 3)
        return NormalCloudImage(normal_im_data, frame=self.frame)
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def visualize_derivatives(image):
    '''
    Plot gradient on left and Laplacian on right.
    Only tested on 2D 1-channel float imags
    '''
    dx1,dy1 = np.gradient(image)
    gradient = dx1 + 1j*dy1
    a1 = np.abs(gradient)
    plt.figure(None,(12,6))
    plt.subplot(121)
    a1 = mean_center(blur(exposure.equalize_hist(unitize(a1)),1))
    plt.imshow(a1,
        origin='lower',interpolation='nearest',cmap='gray',extent=(0,64,)*2)
    plt.title('Gradient Magnitude')
    plt.subplot(122)
    laplacian = scipy.ndimage.filters.laplace(image)
    lhist = mean_center(
        blur(exposure.equalize_hist(unitize(laplacian)),1))
    plt.imshow(lhist,
        origin='lower',interpolation='nearest',cmap='gray',extent=(0,64,)*2)
    plt.title('Laplacian')
    return gradient, laplacian
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_specific_axes(self):
        # Testing that gradient can work on a given axis only
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x, axis=0), dx[0])
        assert_array_equal(gradient(x, axis=1), dx[1])
        assert_array_equal(gradient(x, axis=-1), dx[1])
        assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])

        # test axis=None which means all axes
        assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
        # and is the same as no axis keyword given
        assert_almost_equal(gradient(x, axis=None), gradient(x))

        # test vararg order
        assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
        # test maximal number of varargs
        assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)

        assert_raises(ValueError, gradient, x, axis=3)
        assert_raises(ValueError, gradient, x, axis=-3)
        assert_raises(TypeError, gradient, x, axis=[1,])
项目:pypiv    作者:jr7    | 项目源码 | 文件源码
def _get_displacement_function(self, f):
        """
        Getter function for calculating the displacement.

        :param f: field that is used for the displacement, mainly velocity components
        :returns: function of the Taylor expanded field to first order
        """
        dx = self._distance
        f_x,  f_y  = np.gradient(f  , dx)
        f_xx, f_xy = np.gradient(f_x, dx)
        f_yx, f_yy = np.gradient(f_y, dx)
        return lambda i, j, x, y : (f[i, j] + x*f_x[i, j]  + y*f_y[i, j]
                       + 0.5*(f_xx[i, j]*x**2 + 2*f_xy[i, j]*x*y + f_yy[i, j]*y**2))

    #For the bilinear method the build in scipy method `map_coordinates <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.map_coordinates.html>`_ is used with *order* set to 1.
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def psf_shift(lenslet_efl, dx, dy, mag=1):
    ''' Computes the shift of a PSF, in microns.

    Args:
        lenslet_efl (`microns`): EFL of lenslets.

        dx (`np.ndarray`): dx gradient of wavefront.

        dy (`np.ndarray`): dy gradient of wavefront.

        mag (`float`): magnification of the collimation system.

    Returns:
        `numpy.ndarray`: array of PSF shifts.

    Notes:
        see eq. 12 of Chanan, "Principles of Wavefront Sensing and Reconstruction"
        delta = m * fl * grad(z)
        m is magnification of SH system
        fl is lenslet focal length
        grad(z) is the x, y gradient of the opd, z, which is expressed in
        radians.

    '''
    coef = -mag * lenslet_efl
    return coef * dx, coef * dy
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x), dx)
        assert_array_equal(gradient(v), dx)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_badargs(self):
        # for 2D array, gradient can take 0, 1, or 2 extra args
        x = np.array([[1, 1], [3, 4]])
        assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
                      np.array([1., 1.]), np.array([1., 1.]))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_masked(self):
        # Make sure that gradient supports subclasses like masked arrays
        x = np.ma.array([[1, 1], [3, 4]],
                        mask=[[False, False], [False, False]])
        out = gradient(x)[0]
        assert_equal(type(out), type(x))
        # And make sure that the output and input don't have aliased mask
        # arrays
        assert_(x.mask is not out.mask)
        # Also check that edge_order=2 doesn't alter the original mask
        x2 = np.ma.arange(5)
        x2[2] = np.ma.masked
        np.gradient(x2, edge_order=2)
        assert_array_equal(x2.mask, [False, False, True, False, False])
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_datetime64(self):
        # Make sure gradient() can handle special types like datetime64
        x = np.array(
            ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
             '1910-10-12', '1910-12-12', '1912-12-12'],
            dtype='datetime64[D]')
        dx = np.array(
            [-5, -3, 0, 31, 61, 396, 731],
            dtype='timedelta64[D]')
        assert_array_equal(gradient(x), dx)
        assert_(dx.dtype == np.dtype('timedelta64[D]'))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_timedelta64(self):
        # Make sure gradient() can handle special types like timedelta64
        x = np.array(
            [-5, -3, 10, 12, 61, 321, 300],
            dtype='timedelta64[D]')
        dx = np.array(
            [2, 7, 7, 25, 154, 119, -21],
            dtype='timedelta64[D]')
        assert_array_equal(gradient(x), dx)
        assert_(dx.dtype == np.dtype('timedelta64[D]'))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_specific_axes(self):
        # Testing that gradient can work on a given axis only
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x, axis=0), dx[0])
        assert_array_equal(gradient(x, axis=1), dx[1])
        assert_array_equal(gradient(x, axis=-1), dx[1])
        assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])

        # test axis=None which means all axes
        assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
        # and is the same as no axis keyword given
        assert_almost_equal(gradient(x, axis=None), gradient(x))

        # test vararg order
        assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
        # test maximal number of varargs
        assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)

        assert_raises(ValueError, gradient, x, axis=3)
        assert_raises(ValueError, gradient, x, axis=-3)
        assert_raises(TypeError, gradient, x, axis=[1,])
项目:amset    作者:hackingmaterials    | 项目源码 | 文件源码
def calc_v_vec(self, tp):
        v_vec_all_bands = []
        for ib in range(self.num_bands[tp]):
            v_vec_k_ordered = self.velocity_signed[tp][ib][self.pos_idx[tp]]
            v_vec_all_bands.append(self.grid_from_ordered_list(v_vec_k_ordered, tp, none_missing=True))
        return np.array(v_vec_all_bands)

    # def calc_v_vec(self, tp):
    #     # TODO: Take into account the fact that this gradient is found in three directions specified by the lattice, not
    #     # the x, y, and z directions. It must be corrected to account for this.
    #     energy_grid = self.array_from_kgrid('energy', tp)
    #     # print('energy:')
    #     # np.set_printoptions(precision=3)
    #     # print(energy_grid[0,:,:,:,0])
    #     N = self.kgrid_array[tp].shape
    #     k_grid = self.kgrid_array[tp]
    #     v_vec_result = []
    #     for ib in range(self.num_bands[tp]):
    #         v_vec = np.gradient(energy_grid[ib][:,:,:,0], k_grid[:,0,0,0] * self._rec_lattice.a, k_grid[0,:,0,1] * self._rec_lattice.b, k_grid[0,0,:,2] * self._rec_lattice.c)
    #         v_vec_rearranged = np.zeros((N[0], N[1], N[2], 3))
    #         for i in range(N[0]):
    #             for j in range(N[1]):
    #                 for k in range(N[2]):
    #                     v_vec_rearranged[i,j,k,:] = np.array([v_vec[0][i,j,k], v_vec[1][i,j,k], v_vec[2][i,j,k]])
    #         v_vec_rearranged *= A_to_m * m_to_cm / hbar
    #         v_vec_result.append(v_vec_rearranged)
    #     return np.array(v_vec_result)


    # turns a kgrid property into a list of grid arrays of that property for k integration
项目:pyqha    作者:mauropalumbo75    | 项目源码 | 文件源码
def compute_beta(TT, minT):
    """
    This function computes the volumetric thermal expansion as a numerical
    derivative of the volume as a function of temperature V(T) given in the
    input array *minT*. This array can obtained
    from the free energy minimization which should be done before.
    """
    grad=np.gradient(np.array(minT))  # numerical derivatives with numpy
    betaT = np.array(grad)  # grad contains the derivatives with respect to T
                                # also convert to np.array format    
    return betaT/minT
项目:pyqha    作者:mauropalumbo75    | 项目源码 | 文件源码
def compute_alpha(minT,ibrav):
    """
    This function calculates the thermal expansion alphaT at different temperatures
    from the input minT matrix by computing the numerical derivatives with numpy.
    The input matrix minT has shape nT*6, where the first index is the temperature 
    and the second the lattice parameter. For example, minT[i,0] and minT[i,2] are
    the lattice parameters a and c at the temperature i.

    More ibrav types must be implemented
    """

    grad=np.gradient(np.array(minT))  # numerical derivatives with numpy
    alphaT = np.array(grad[0])  # grad[0] contains the derivatives with respect to T, which is the first axis in minT
                                # also convert to np.array format

    # now normalize the alpha properly. It must be different for different ibrav
    # to avoid a divide by 0 error (minT is zero for lattice parameters not defined
    # in the system)
    if ibrav==4:
        alphaT[:,0] = alphaT[:,0]/minT[:,0]
        alphaT[:,2] = alphaT[:,2]/minT[:,2]

    return alphaT


################################################################################
项目:DVH-Analytics    作者:cutright    | 项目源码 | 文件源码
def calc_eud(dvh, a):
    v = -np.gradient(dvh)

    dose_bins = np.linspace(1, np.size(dvh), np.size(dvh))
    dose_bins = np.round(dose_bins, 3)
    bin_centers = dose_bins - 0.5
    eud = np.power(np.sum(np.multiply(v, np.power(bin_centers, a))), 1 / float(a))
    eud = np.round(eud, 2) * 0.01

    return eud
项目:meshpy    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def _compute_gradients(self):
        """Computes the gradients of the SDF.

        Returns
        -------
        :obj:`list` of :obj:`numpy.ndarray` of float
            A list of ndarrays of the same dimension as the SDF. The arrays
            are in axis order and specify the gradients for that axis
            at each point.
        """
        self.gradients_ = np.gradient(self.data_)
项目:meshpy    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def curvature(self, coords, delta=0.001):
        """
        Returns an approximation to the local SDF curvature (Hessian) at the
        given coordinate in grid basis.

        Parameters
        ---------
        coords : numpy 3-vector
            the grid coordinates at which to get the curvature

        Returns
        -------
        curvature : 3x3 ndarray of the curvature at the surface points
        """
        # perturb local coords
        coords_x_up   = coords + np.array([delta, 0, 0])
        coords_x_down = coords + np.array([-delta, 0, 0])
        coords_y_up   = coords + np.array([0, delta, 0])
        coords_y_down = coords + np.array([0, -delta, 0])
        coords_z_up   = coords + np.array([0, 0, delta])
        coords_z_down = coords + np.array([0, 0, -delta])

        # get gradient
        grad_x_up = self.gradient(coords_x_up)
        grad_x_down = self.gradient(coords_x_down)
        grad_y_up = self.gradient(coords_y_up)
        grad_y_down = self.gradient(coords_y_down)
        grad_z_up = self.gradient(coords_z_up)
        grad_z_down = self.gradient(coords_z_down)

        # finite differences
        curvature_x = (grad_x_up - grad_x_down) / (4 * delta)
        curvature_y = (grad_y_up - grad_y_down) / (4 * delta)
        curvature_z = (grad_z_up - grad_z_down) / (4 * delta)
        curvature = np.c_[curvature_x, np.c_[curvature_y, curvature_z]]
        curvature = curvature + curvature.T
        return curvature
项目:tadtool    作者:vaquerizaslab    | 项目源码 | 文件源码
def call_tad_borders(ii_results, cutoff=0):
    """
    Calls TAD borders using the first derivative of the insulation index.

    :param ii_results: (raw) insulation index results, numpy vector
    :param cutoff: raw insulation index threshold for "true" TAD boundaries
    """
    tad_borders = []
    g = np.gradient(ii_results)
    for i in range(len(ii_results)):
        border_type = _border_type(i, g)
        if border_type == 1 and ii_results[i] <= cutoff:
            tad_borders.append(i)
    return tad_borders
项目:tda-image-analysis    作者:rachellevanger    | 项目源码 | 文件源码
def orientation_field(bmp, sigma=3):
    # Author: Shaun Harker, 2016
    # Based on algorithm by Bazen and Gerez from "Systematic methods for the 
    # computation of the directional fields and singular points of fingerprints," 2002.
    """
    Computes orientation field (result everywhere between -pi/2 and pi/2)
    from the given vector field.
    """
    u = bmp.astype(float)
    du = np.gradient(u)
    [ux, uy] = du
    Y = scipy.ndimage.filters.gaussian_filter(2.0*ux*uy, sigma=sigma)
    X = scipy.ndimage.filters.gaussian_filter(ux**2.0 - uy**2.0, sigma=sigma)
    return .5 * np.arctan2(Y, X)
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def calculate_quality_grid(self, voi, gantry, couch, calculate_from=0, stepsize=1.0, avoid=[], gradient=True):
        """ TODO: Documentation
        """
        result = self.calculate_quality_list(voi, gantry, couch, calculate_from, stepsize,
                                             avoid=avoid, gradient=gradient)
        result = sorted(result, key=cmp_to_key(cmp_sort))
        grid_data = []
        for x in result:
            grid_data.append(x["data"][0])
        result = np.reshape(grid_data, (len(gantry), len(couch)))
        return result
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def calculate_quality_list(self, voi, gantry, couch, calculate_from=0, stepsize=1.0, avoid=[], gradient=True):
        """ TODO: Documentation
        """
        q = Queue(32767)
        process = []
        d = voi.get_voi_cube()
        d.cube = np.array(d.cube, dtype=np.float32)
        voi_cube = DensityProjections(d)
        result = []
        for gantry_angle in gantry:
            p = Process(
                target=self.calculate_angle_quality_thread,
                args=(voi, gantry_angle, couch, calculate_from, stepsize, q, avoid, voi_cube, gradient))
            p.start()
            p.deamon = True
            process.append(p)
            if len(process) > 2:
                tmp = q.get()
                result.append(tmp)
                for p in process:
                    if not p.is_alive():
                        process.remove(p)
        while not len(result) == len(gantry) * len(couch):
            tmp = q.get()
            result.append(tmp)
        return result
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def calculate_angle_quality(self,
                                voi,
                                gantry,
                                couch,
                                calculate_from=0,
                                stepsize=1.0,
                                avoid=[],
                                voi_cube=None,
                                gradient=True):
        """
        Calculates a quality index for a given gantry/couch combination.
        """
        voi_min, voi_max = voi.get_min_max()
        for v in avoid:
            v_min, v_max = v.get_min_max()
        if voi_cube is None:
            d = voi.get_voi_cube()
            d.cube = np.array(d.cube, dtype=np.float32)
            voi_cube = DensityProjections(d)

        data, start, basis = self.calculate_projection(voi, gantry, couch, calculate_from, stepsize)
        voi_proj, t1, t2 = voi_cube.calculate_projection(voi, gantry, couch, 1, stepsize)

        if gradient:
            gradient = np.gradient(data)
            data = (gradient[0]**2 + gradient[1]**2)**0.5
        a = data * (voi_proj > 0.0)
        quality = sum(a)
        area = sum(voi_proj > 0.0)
        # ~ area = sum(data>0.0)/10
        if gradient:
            mean_quality = 10 - abs(quality / area)
        else:
            mean_quality = abs(quality / area)
        return mean_quality, quality, area
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def get_direction(asa, *, sigma=None):
    """Return epochs during which an animal was running left to right, or right
    to left.

    Parameters
    ----------
    asa : AnalogSignalArray 1D
        AnalogSignalArray containing the 1D position data.
    sigma : float, optional
        Smoothing to apply to position (x) before computing gradient estimate.
        Default is 0.

    Returns
    -------
    l2r, r2l : EpochArrays
        EpochArrays corresponding to left-to-right and right-to-left movement.
    """
    if sigma is None:
        sigma = 0
    if not isinstance(asa, core.AnalogSignalArray):
        raise TypeError('AnalogSignalArray expected!')
    assert asa.n_signals == 1, "1D AnalogSignalArray expected!"

    direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
                                       rectify=False).ydata
    direction[direction>=0] = 1
    direction[direction<0] = -1
    direction = direction.squeeze()

    l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
    l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
    l2r = core.EpochArray(asa.time[l2r])

    r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
    r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
    r2l = core.EpochArray(asa.time[r2l])

    return l2r, r2l
项目:DrosophilaCooperative    作者:avaccari    | 项目源码 | 文件源码
def hessian(array):
    (dy, dx) = np.gradient(array)
    (dydy, dxdy) = np.gradient(dy)
    (dydx, dxdx) = np.gradient(dx)
    return np.dstack((dxdx, dydx, dxdy, dydy))
项目:Lyssandra    作者:ektormak    | 项目源码 | 文件源码
def gen_dgauss(sigma):
    '''
    gradient of the gaussian on both directions.
    '''
    fwid = np.int(2 * np.ceil(sigma))
    G = np.array(range(-fwid, fwid + 1)) ** 2
    G = G.reshape((G.size, 1)) + G
    G = np.exp(- G / 2.0 / sigma / sigma)
    G /= np.sum(G)
    GH, GW = np.gradient(G)
    GH *= 2.0 / np.sum(np.abs(GH))
    GW *= 2.0 / np.sum(np.abs(GW))
    return GH, GW
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def gradient_pdf(self, x):
        """Return the gradient of density of the joint prior at x."""
        raise NotImplementedError
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def gradient_logpdf(self, x, stepsize=None):
        """Return the gradient of log density of the joint prior at x.

        Parameters
        ----------
        x : float or np.ndarray
        stepsize : float or list
            Stepsize or stepsizes for the dimensions

        """
        x = np.asanyarray(x)
        ndim = x.ndim
        x = x.reshape((-1, self.dim))

        grads = np.zeros_like(x)

        for i in range(len(grads)):
            xi = x[i]
            grads[i] = numgrad(self.logpdf, xi, h=stepsize)

        grads[np.isinf(grads)] = 0
        grads[np.isnan(grads)] = 0

        if ndim == 0 or (ndim == 1 and self.dim > 1):
            grads = grads[0]
        return grads
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_basic(self):
        v = [[1, 1], [3, 4]]
        x = np.array(v)
        dx = [np.array([[2., 3.], [2., 3.]]),
              np.array([[0., 0.], [1., 1.]])]
        assert_array_equal(gradient(x), dx)
        assert_array_equal(gradient(v), dx)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_badargs(self):
        # for 2D array, gradient can take 0, 1, or 2 extra args
        x = np.array([[1, 1], [3, 4]])
        assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
                      np.array([1., 1.]), np.array([1., 1.]))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_masked(self):
        # Make sure that gradient supports subclasses like masked arrays
        x = np.ma.array([[1, 1], [3, 4]],
                        mask=[[False, False], [False, False]])
        out = gradient(x)[0]
        assert_equal(type(out), type(x))
        # And make sure that the output and input don't have aliased mask
        # arrays
        assert_(x.mask is not out.mask)
        # Also check that edge_order=2 doesn't alter the original mask
        x2 = np.ma.arange(5)
        x2[2] = np.ma.masked
        np.gradient(x2, edge_order=2)
        assert_array_equal(x2.mask, [False, False, True, False, False])
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_datetime64(self):
        # Make sure gradient() can handle special types like datetime64
        x = np.array(
            ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
             '1910-10-12', '1910-12-12', '1912-12-12'],
            dtype='datetime64[D]')
        dx = np.array(
            [-5, -3, 0, 31, 61, 396, 731],
            dtype='timedelta64[D]')
        assert_array_equal(gradient(x), dx)
        assert_(dx.dtype == np.dtype('timedelta64[D]'))