Python numpy 模块,absolute() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.absolute()

项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def compute(self, frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        descriptor = []
        dominantGradients = np.zeros_like(frame)
        maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
        maxGradient = np.absolute(maxGradient)
        for k in range(1,len(self.kernels)):
            kernel = self.kernels[k]
            gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
            gradient = np.absolute(gradient)
            np.maximum(maxGradient, gradient, maxGradient)
            indices = (maxGradient == gradient)
            dominantGradients[indices] = k

        frameH, frameW = frame.shape
        for row in range(self.rows):
            for col in range(self.cols):
                mask = np.zeros_like(frame)
                mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
                hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
                hist = cv2.normalize(hist, None)
                descriptor.append(hist)
        return np.concatenate([x for x in descriptor])
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def world_2_voxel(world_coordinates, origin, spacing):
    stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
    voxel_coordinates = stretched_voxel_coordinates / spacing
    return voxel_coordinates
项目:GraphTime    作者:GlooperLabs    | 项目源码 | 文件源码
def soft_threshold(X, thresh):
    """Proximal mapping of l1-norm results in soft-thresholding. Therefore, it is required
    for the optimisation of the GFGL or IFGL.

    Parameters
    ----------
    X : ndarray
        input data of arbitrary shape
    thresh : float
        threshold value

    Returns
    -------
    ndarray soft threshold applied
    """
    return (np.absolute(X) - thresh).clip(0) * np.sign(X)
项目:deep_srl    作者:luheng    | 项目源码 | 文件源码
def tensorize(sentence, max_length):
  """ Input:
      - sentence: The sentence is a tuple of lists (s1, s2, ..., sk)
            s1 is always a sequence of word ids.
            sk is always a sequence of label ids.
            s2 ... sk-1 are sequences of feature ids,
              such as predicate or supertag features.
      - max_length: The maximum length of sequences, used for padding.
  """
  x = np.array([t for t in zip(*sentence[:-1])])
  y = np.array(sentence[-1])
  weights = (y >= 0).astype(float)
  x.resize([max_length, x.shape[1]])
  y.resize([max_length])
  weights.resize([max_length])
  return x, np.absolute(y), len(sentence[0]), weights
项目:PortfolioTimeSeriesAnalysis    作者:MizioAnd    | 项目源码 | 文件源码
def outlier_identification(self, model, x_train, y_train):
        # Split the training data into an extra set of test
        x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train)
        print('\nOutlier shapes')
        print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split))
        model.fit(x_train_split, y_train_split)
        y_predicted = model.predict(x_test_split)
        residuals = np.absolute(y_predicted - y_test_split)
        rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split)
        outliers_mask = residuals >= rmse_pred_vs_actual
        outliers_mask = np.concatenate([np.zeros((np.shape(y_train_split)[0],), dtype=bool), outliers_mask])
        not_an_outlier = outliers_mask == 0
        # Resample the training set from split, since the set was randomly split
        x_out = np.insert(x_train_split, np.shape(x_train_split)[0], x_test_split, axis=0)
        y_out = np.insert(y_train_split, np.shape(y_train_split)[0], y_test_split, axis=0)
        return x_out[not_an_outlier, ], y_out[not_an_outlier, ]
项目:balu-python    作者:dipaco    | 项目源码 | 文件源码
def edge_LoG(I, sigma):
    LoG = laplace(gaussian(I, sigma=sigma), ksize=3)
    thres = np.absolute(LoG).mean() * 1.0
    output = sp.zeros(LoG.shape)
    w = output.shape[1]
    h = output.shape[0]

    for y in range(1, h - 1):
        for x in range(1, w - 1):
            patch = LoG[y - 1:y + 2, x - 1:x + 2]
            p = LoG[y, x]
            maxP = patch.max()
            minP = patch.min()
            if p > 0:
                zeroCross = True if minP < 0 else False
            else:
                zeroCross = True if maxP > 0 else False
            if ((maxP - minP) > thres) and zeroCross:
                output[y, x] = 1

    #FIXME: It is necesary to define if return the closing of the output or just the output
    #return binary_closing(output)
    return output
项目:Poccala    作者:Byshx    | 项目源码 | 文件源码
def fft(frames, nfft=512):
            """
            ???????

                ????????????????????????????????????
            ??????????????????????????????????????
            ??????????????????????????????????????
            ??????????????????????????????????????
            ????

            :param frames:????????
            :param nfft:fft???????
            :return:???nfft//2+1?????????????
            """
            complex_spec = np.fft.rfft(frames, nfft)
            return np.absolute(complex_spec)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):

    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the gradient in x and y separately
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # 3) Take the absolute value of the x and y gradients
    abs_sobelx = np.absolute(sobelx)
    abs_sobely = np.absolute(sobely)
    # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
    absgraddir = np.arctan2(abs_sobely, abs_sobelx)
    # 5) Create a binary mask where direction thresholds are met
    binary_output = np.zeros_like(absgraddir)
    binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
    # 6) Return this mask as your binary_output image
    return binary_output


# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the gradient in x and y separately
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # 3) Calculate the magnitude
    gradmag = np.sqrt(sobelx**2 + sobely**2)
    # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
    scale_factor = np.max(gradmag)/255
    gradmag = (gradmag/scale_factor).astype(np.uint8)
    # 5) Create a binary mask where mag thresholds are met
    binary_output = np.zeros_like(gradmag)
    binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
    # 6) Return this mask as your binary_output image
    return binary_output


# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):

    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the derivative in x or y given orient = 'x' or 'y'
    if orient == 'x':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
    if orient == 'y':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
    # 3) Take the absolute value of the derivative or gradient
    abs_sobel = np.absolute(sobel)
    # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # 5) Create a mask of 1's where the scaled gradient magnitude
            # is > thresh_min and < thresh_max
    binary_output = np.zeros_like(scaled_sobel)
    binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
    # 6) Return this mask as your binary_output image
    return binary_output
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_endian(self):
        msg = "big endian"
        a = np.arange(6, dtype='>i4').reshape((2, 3))
        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
                           err_msg=msg)
        msg = "little endian"
        a = np.arange(6, dtype='<i4').reshape((2, 3))
        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
                           err_msg=msg)

        # Output should always be native-endian
        Ba = np.arange(1, dtype='>f8')
        La = np.arange(1, dtype='<f8')
        assert_equal((Ba+Ba).dtype, np.dtype('f8'))
        assert_equal((Ba+La).dtype, np.dtype('f8'))
        assert_equal((La+Ba).dtype, np.dtype('f8'))
        assert_equal((La+La).dtype, np.dtype('f8'))

        assert_equal(np.absolute(La).dtype, np.dtype('f8'))
        assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
        assert_equal(np.negative(La).dtype, np.dtype('f8'))
        assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
项目:psp    作者:cmap    | 项目源码 | 文件源码
def build_parser():
    """Build argument parser."""

    parser = argparse.ArgumentParser(
        description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    # Required args
    parser.add_argument("--in_gct_path", "-i", required=True,
                        help="filepath to input gct")

    # Optional args
    parser.add_argument("--out_name", "-o", default=None,
                        help="name of output file (default is <INPUT_GCT>.tear.processed.gct")
    parser.add_argument("--divide_by_mad", "-dm", action="store_true", default=False,
                    help=("whether to divide by median absolute deviation " +
                          "in addition to subtracting the probe median"))
    parser.add_argument("--ignore_subset_norm", "-ig", action="store_true", default=False,
                        help="whether to ignore subset-specific normalization")
    parser.add_argument("-psp_config_path", type=str,
                        default="~/psp_production.cfg",
                        help="filepath to PSP config file")
    parser.add_argument("-verbose", "-v", action="store_true", default=False,
                        help="increase the number of messages reported")

    return parser
项目:bpy_lambda    作者:bcongdon    | 项目源码 | 文件源码
def flow(self, Kc, Ks, Kz, Ka, numexpr):
        zeros = np.zeros
        where = np.where
        min = np.minimum
        max = np.maximum
        abs = np.absolute
        arctan = np.arctan
        sin = np.sin

        center = (slice(   1,   -1,None),slice(   1,  -1,None))
        rock = self.center
        ds = self.scour[center]    
        rcc = rock[center]
        rock[center] = rcc - ds * Kz
        # there isn't really a bottom to the rock but negative values look ugly
        rock[center] = where(rcc<0,0,rcc)
项目:Epileptic-Seizure-Prediction    作者:cedricsimar    | 项目源码 | 文件源码
def compute_spectrogram(self, sig, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec):

        n_channels = 16
        n_timesteps = int((data_length_sec - win_length_sec) / stride_sec + 1)
        n_fbins = nfreq_bands

        sig = np.transpose(sig)

        sig2 = np.zeros((n_channels, n_fbins, n_timesteps))
        for i in range(n_channels):
            sigc = np.zeros((n_fbins, n_timesteps))
            for frame_num, w in enumerate(range(0, int(data_length_sec - win_length_sec + 1), stride_sec)):

                sigw = sig[i, w * sampling_frequency: (w + win_length_sec) * sampling_frequency]
                sigw = self.hanning(sigw)
                fft = self.log10(np.absolute(np.fft.rfft(sigw)))
                fft_freq = np.fft.rfftfreq(n=sigw.shape[-1], d=1.0 / sampling_frequency)
                sigc[:nfreq_bands, frame_num] = self.group_into_bands(fft, fft_freq, nfreq_bands)

            sig2[i, :, :] = sigc

        return np.transpose(sig2, axes=(2,1,0))
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def scrub(cls, image):
        """
        Apply Stroke-Width Transform to image.

        :param filepath: relative or absolute filepath to source image
        :return: numpy array representing result of transform
        """

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        canny, sobelx, sobely, theta = cls._create_derivative(gray)
        swt = cls._swt(theta, canny, sobelx, sobely)
        shapes = cls._connect_components(swt)
        swts, heights, widths, topleft_pts, images = cls._find_letters(swt, shapes)
        if(len(swts)==0):
            #didn't find any text, probably a bad face
            return None

        word_images = cls._find_words(swts, heights, widths, topleft_pts, images)

        final_mask = np.zeros(swt.shape)
        for word in word_images:
            final_mask += word
        return final_mask
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def compute_ang_corr(self, input_frame, normed=True, ang_max=10):
        """Compute the angular correlation from the polar representation of given pattern

        Arguments:
            polar_arr (array) - Polar data array (usually output of convert())
            normed (bool, optional) - Whether to normalize Fourier transform in each radial bin
            ang_max (float, optional) - How many Fourier components to keep

        Returns:
            ang_corr (array) - Angular correlations for each input bin
        """
        polar_arr = self.compute_polar(input_frame)
        ang_corr = np.array([a - a.mean() for a in polar_arr])
        temp = []
        for a in ang_corr:
            if normed:
                la = np.linalg.norm(a)
                if la > 0.:
                    temp.append(np.absolute(np.fft.fft(a/la))[1:ang_max])
                else:
                    temp.append(np.zeros(ang_max-1))
            else:
                temp.append(np.absolute(np.fft.fft(a))[1:ang_max])
        return np.array(temp)
项目:ANN-PONR-Python3    作者:anon-42    | 项目源码 | 文件源码
def plot_gradients(self, foo=False):
        ''' 
        Shows the difference between the computed gradients in the ANN modul 
        and the numerically calculated gradients.
        '''
        fig = plt.gcf()
        fig.canvas.set_window_title('Comparison of the computed gradients')
        numgrad, grad, qua, ok = ngc.compare_gradients(self.Net, 
                                                       self.inputdata_tr, 
                                                       self.outputdata_tr)
        print(qua, ok)
        y = numgrad-grad
        y2 = np.absolute(y)   
        plt.bar(np.arange(1,len(y)+1), y)
        plt.grid(1)
        plt.xlabel('Gradient')
        plt.ylabel('Difference')
        plt.show()

        if foo:
            print('numgrad: ', numgrad)
            print('grad: ', grad)
        print('difference: ', y)
项目:Least-Squared-Error-Based-FIR-Filters    作者:fourier-being    | 项目源码 | 文件源码
def lpfls(N,wp,ws,W):
    M = (N-1)/2
    nq = np.arange(0,2*M+1)
    nb = np.arange(0,M+1)
    q = (wp/np.pi)*np.sinc((wp/np.pi)*nq) - W*(ws/np.pi)*np.sinc((ws/np.pi)*nq)
    b = (wp/np.pi)*np.sinc((wp/np.pi)*nb)
    b[0] = wp/np.pi
    q[0] = wp/np.pi + W*(1-ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
    b = b.transpose()

    Q1 = ln.toeplitz(q[0:M+1])
    Q2 = ln.hankel(q[0:M+1],q[M:])
    Q = Q1+Q2

    a = ln.solve(Q,b)
    h = list(nq)
    for i in nb:
        h[i] = 0.5*a[M-i]
        h[N-1-i] = h[i]
    h[M] = 2*h[M]
    hmax = max(np.absolute(h))
    for i in nq:
        h[i] = (8191/hmax)*h[i]
    return h
项目:Least-Squared-Error-Based-FIR-Filters    作者:fourier-being    | 项目源码 | 文件源码
def bpfls(N,ws1,wp1,wp2,ws2,W):
    M = (N-1)/2
    nq = np.arange(0,2*M+1)
    nb = np.arange(0,M+1)
    q = W*np.sinc(nq) - (W*ws2/np.pi) * np.sinc(nq* (ws2/np.pi)) + (wp2/np.pi) * np.sinc(nq*(wp2/np.pi)) - (wp1/np.pi) * np.sinc(nq*(wp1/np.pi)) + (W*ws1/np.pi) * np.sinc(nq*(ws1/np.pi))
    b = (wp2/np.pi)*np.sinc((wp2/np.pi)*nb) - (wp1/np.pi)*np.sinc((wp1/np.pi)*nb)
    b[0] = wp2/np.pi - wp1/np.pi
    q[0] = W - W*ws2/np.pi + wp2/np.pi - wp1/np.pi + W*ws1/np.pi # since sin(pi*n)/pi*n = 1, not 0
    b = b.transpose()

    Q1 = ln.toeplitz(q[0:M+1])
    Q2 = ln.hankel(q[0:M+1],q[M:])
    Q = Q1+Q2

    a = ln.solve(Q,b)
    h = list(nq)
    for i in nb:
        h[i] = 0.5*a[M-i]
        h[N-1-i] = h[i]
    h[M] = 2*h[M]
    hmax = max(np.absolute(h))
    for i in nq:
        h[i] = (8191/hmax)*h[i]
    return h
项目:Least-Squared-Error-Based-FIR-Filters    作者:fourier-being    | 项目源码 | 文件源码
def hpfls(N,ws,wp,W):
    M = (N-1)/2
    nq = np.arange(0,2*M+1)
    nb = np.arange(0,M+1)
    b = 1 - (wp/np.pi)* np.sinc(nb * wp/np.pi)
    b[0] = 1- wp/np.pi
    q = 1 - (wp/np.pi)* np.sinc(nq * wp/np.pi) + W * (ws/np.pi) * np.sinc(nq * ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
    q[0] = b[0] + W* ws/np.pi
    b = b.transpose()

    Q1 = ln.toeplitz(q[0:M+1])
    Q2 = ln.hankel(q[0:M+1],q[M:])
    Q = Q1+Q2

    a = ln.solve(Q,b)
    h = list(nq)
    for i in nb:
        h[i] = 0.5*a[M-i]
        h[N-1-i] = h[i]
    h[M] = 2*h[M]
    hmax = max(np.absolute(h))
    for i in nq:
        h[i] = (8191/hmax)*h[i]
    return h
项目:DataMining    作者:lidalei    | 项目源码 | 文件源码
def bias_var(true_preds, sum_preds, counts, n_replicas):
    '''
    compute bias and variance
    @param true_preds: true labels
    @param sum_preds: array of summation of the predictions of each sample
    @param counts: the times each sample is tested (predicted)
    @return: squared bias, variance
    '''
    sample_bias = np.absolute(true_preds - sum_preds / counts)
    sample_var = sample_bias * (1.0 - sample_bias)

    weighted_sample_bias_2 = np.power(sample_bias, 2.0) * (counts / n_replicas)
    weighted_sample_var = sample_var * (counts / n_replicas)
    bias = np.mean(weighted_sample_bias_2)
    var = np.mean(weighted_sample_var)

    return bias, var
项目:Tweezer_design    作者:AntoineRiaud    | 项目源码 | 文件源码
def Mesh_Theta(target_size,omega,mu_0,s_Ray,N_turns,l,electrodes_angle):
    freq = omega/(2*numpy.pi)
    lambda_approx = 1/(freq*numpy.mean(s_Ray))
    R0 = target_size + omega*numpy.mean(mu_0)*lambda_approx/(2*numpy.pi)
    if l==0:
        Theta = []
        phi_0 = 2*numpy.pi*R0/lambda_approx
        for n in range(N_turns):
            Theta_max = 2*numpy.pi
            R_approx = R0+n*lambda_approx
            dTheta = lambda_approx/(6*R_approx)
            Theta.append(numpy.arange(electrodes_angle[0],Theta_max+electrodes_angle[0],dTheta))
    else:
        Theta_max = 2*numpy.pi*numpy.ceil(N_turns/numpy.absolute(l))
        Theta = [electrodes_angle[0]]
        while Theta[-1]<(electrodes_angle[0]+Theta_max):
            R_approx = R0+numpy.abs(l)*Theta[-1]*lambda_approx/(2*numpy.pi)
            dTheta = lambda_approx/(6*R_approx)
            Theta.append(Theta[-1]+dTheta)
        if l<0:
            Theta = Theta[::-1]
            phi_0 = numpy.abs(l)*(Theta[0]+2*numpy.pi*R0/lambda_approx)
        else:
            phi_0 = (2*numpy.pi*R0/lambda_approx)
    return {'Theta':Theta,'phi_0':phi_0}
项目:XYalign    作者:WilsonSayresLab    | 项目源码 | 文件源码
def transform_depth(numpy_array):
    """
    Performs custom version of log transformation on a numpy array. Where each
    value is processed to be equal to:
    initial_sign * abs(log10(abs(value)))

    Parameters
    ----------
    numpy_array : numpy array
        Array of values without NaNs

    Returns
    -------
    numpy array
    """
    signs = np.sign(numpy_array)
    step1 = np.absolute(numpy_array)
    id_zeros = step1 != 0
    step2 = np.absolute(np.log10(step1, where=id_zeros))
    return signs * step2
项目:MDT    作者:cbclab    | 项目源码 | 文件源码
def __call__(self):
        locations = LinearLocator.__call__(self)

        new_locations = []
        for location in locations:
            if np.absolute(location) < 0.01:
                new_locations.append(float("{:.1e}".format(location)))
            else:
                new_locations.append(np.round(location, 3))

        if np.isclose(new_locations[-1], self.max_val) or new_locations[-1] >= self.max_val:
            new_locations[-1] = self.max_val

        if new_locations[0] <= self.min_val:
            new_locations[0] = self.min_val

        return new_locations
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def test_slow_gridding_against_jvdp_nfft(self):
        t, tsc, y, err = data()

        nf = int(nfft_sigma * len(t))
        gpu_grid = simple_gpu_nfft(t, y, nf, sigma=nfft_sigma, m=nfft_m,
                                   just_return_gridded_data=True,
                                   fast_grid=False,
                                   minimum_frequency=-int(nf/2),
                                   samples_per_peak=spp)

        # get CPU grid
        cpu_grid = get_cpu_grid(tsc, y, nf, sigma=nfft_sigma, m=nfft_m)

        diffs = np.absolute(gpu_grid - cpu_grid)
        inds = (np.argsort(diffs)[::-1])[:10]

        for i, gpug, cpug, d in zip(inds, gpu_grid[inds],
                                    cpu_grid[inds],
                                    diffs[inds]):
            print(i, gpug, cpug, d)

        tols = dict(rtol=nfft_rtol, atol=nfft_atol)
        assert_allclose(gpu_grid, cpu_grid, **tols)
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def test_large_run(self, make_plot=False, **kwargs):
        proc = ConditionalEntropyAsyncProcess(**kwargs)
        t, y, dy = data(sigma=0.01, ndata=100, freq=4.)
        df = 0.001
        max_freq = 100.
        min_freq = df
        nf = int((max_freq - min_freq) / df)
        freqs = min_freq + df * np.arange(nf)

        r0 = proc.run([(t, y, dy)], freqs=freqs)
        r1 = proc.large_run([(t, y, dy)], freqs=freqs, max_memory=1e7)

        f0, p0 = r0[0]
        f1, p1 = r1[0]

        rel_err = max(np.absolute(p0 - p1)) / np.median(np.absolute(p0))
        print(max(np.absolute(p0 - p1)), rel_err)
        assert_allclose(p0, p1, rtol=1e-4, atol=1e-2)
项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def quasistable(self, quasi_stable_strain_ids=None, surviving_strain_ids=None):
        """
        Stability check.
        If stable return True, else return False
        """
        if quasi_stable_strain_ids is not None:
            i_1 = int(self.t / 3.)
            i_2 = 2 * i_1
            max_diff = n_max(absolute(
                divide(
                    n_sum(self._counts_over_time[i_1:i_2], axis=0), n_sum(self._counts_over_time[i_2:], axis=0)
                )[quasi_stable_strain_ids]
            ))
            if abs(1 - max_diff) >= 0.02:
                return False
            else:
                print 'quasistable at t= ', self.t
                return True
        if surviving_strain_ids is not None:
            if not count_nonzero(self._counts_over_time[int(self.t)][surviving_strain_ids]):
                print 'protected strain died out at t= ', self.t
                return True
            else:
                return False
        return False
项目:Tesis-UIP    作者:ajlongart    | 项目源码 | 文件源码
def maxImagen(img, tamanyo):
    ''''''
    bOri, gOri, rOri = cv2.split(img)
    filas,columnas,canales = img.shape
    #pad_size = tamanyo/2
    #padded_max = np.pad(img, (pad_size, pad_size),'constant',constant_values=np.inf)
    max_channel = np.zeros((filas,columnas))
    for r in range(1,filas):
        for c in range(1,columnas):
            window_b = bOri[r:r+tamanyo,c:c+tamanyo]
            window_g = gOri[r:r+tamanyo,c:c+tamanyo]
            window_r = rOri[r:r+tamanyo,c:c+tamanyo]
            max_bg = np.max(window_b+window_g)
            max_r = np.max(window_r)
            max_ch = max_r-max_bg       #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
            max_ch_array = np.array([max_ch])
            max_channel[r,c] = max_ch_array

    min_max_channel = np.min(max_channel)
    background_bOri = np.mean(bOri*min_max_channel)
    background_gOri = np.mean(gOri*min_max_channel)
    BbOri = np.absolute(background_bOri)
    BgOri = np.absolute(background_gOri)

    return BbOri, BgOri     #max_channel,
项目:Tesis-UIP    作者:ajlongart    | 项目源码 | 文件源码
def maxImagen(img, tamMax):
    ''''''
    bOri, gOri, rOri = cv2.split(img)
    filas,columnas,canales = img.shape

    max_channel = np.zeros((filas,columnas))
    for r in range(1,filas):
        for c in range(1,columnas):
            window_b = bOri[r:r+tamMax,c:c+tamMax]
            window_g = gOri[r:r+tamMax,c:c+tamMax]
            window_r = rOri[r:r+tamMax,c:c+tamMax]
            max_bg = np.max(window_b+window_g)
            max_r = np.max(window_r)
            max_ch = max_r-max_bg       #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
            max_ch_array = np.array([max_ch])

            max_channel[r,c] = max_ch_array

    min_max_channel = np.min(max_channel)
    background_bOri = np.mean(bOri*min_max_channel)
    background_gOri = np.mean(gOri*min_max_channel)
    BbOri = np.absolute(background_bOri)
    BgOri = np.absolute(background_gOri)

    return BbOri, BgOri     #max_channel,
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_endian(self):
        msg = "big endian"
        a = np.arange(6, dtype='>i4').reshape((2, 3))
        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
                           err_msg=msg)
        msg = "little endian"
        a = np.arange(6, dtype='<i4').reshape((2, 3))
        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
                           err_msg=msg)

        # Output should always be native-endian
        Ba = np.arange(1, dtype='>f8')
        La = np.arange(1, dtype='<f8')
        assert_equal((Ba+Ba).dtype, np.dtype('f8'))
        assert_equal((Ba+La).dtype, np.dtype('f8'))
        assert_equal((La+Ba).dtype, np.dtype('f8'))
        assert_equal((La+La).dtype, np.dtype('f8'))

        assert_equal(np.absolute(La).dtype, np.dtype('f8'))
        assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
        assert_equal(np.negative(La).dtype, np.dtype('f8'))
        assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def threshold_test(self):
        mx_adj, my_adj, mz_adj = self.mag_adj()
        m_normal = np.sqrt(np.square(mx_adj)+np.square(my_adj)+np.square(mz_adj))

        heading = np.degrees(np.arctan2(mx_adj/m_normal, my_adj/m_normal))

        heading_diff = np.diff(heading)

        rotate_index =  np.insert(np.where(np.absolute(heading_diff)>20.0), 0, 0)

        plt.plot(heading_diff)
        plt.show()

        angle_lst = []
        for i in range(rotate_index.size):
            try:
                angle_onestep = np.mean(heading[rotate_index[i]: rotate_index[i+1]])
                angle_lst.append(angle_onestep)
            except:
                pass

        print angle_lst
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def fft_test2(self):
        axis = str(self.axis_combobox.currentText())

        if axis.startswith('a'):
            normal_para = 16384.0
        elif axis.startswith('g'):
            normal_para = 131.0
        signal =( self.raw_data[axis] - self.bias_dict[axis])/ normal_para

        n = signal.size # Number of data points
        dx = 0.007 # Sampling period (in meters)
        Fk = np.fft.fft(signal) # Fourier coefficients (divided by n)
        nu = np.fft.fftfreq(n,dx) # Natural frequencies
        #Fk = np.fft.fftshift(Fk) # Shift zero freq to center
        #nu = np.fft.fftshift(nu) # Shift zero freq to center
        f, ax = plt.subplots(3,1,sharex=True)
        ax[0].plot(nu, np.real(Fk)) # Plot Cosine terms
        ax[0].set_ylabel(r'$Re[F_k]$', size = 'x-large')
        ax[1].plot(nu, np.imag(Fk)) # Plot Sine terms
        ax[1].set_ylabel(r'$Im[F_k]$', size = 'x-large')
        ax[2].plot(nu, np.absolute(Fk)**2) # Plot spectral power
        ax[2].set_ylabel(r'$\vert F_k \vert ^2$', size = 'x-large')
        ax[2].set_xlabel(r'$\widetilde{\nu}$', size = 'x-large')
        plt.title(axis)
        plt.show()
项目:inception-face-shape-classifier    作者:adonistio    | 项目源码 | 文件源码
def q(landmarks,index1,index2):
#get angle between a i1 and i2

    x1 = landmarks[int(index1)][0]
    y1 = landmarks[int(index1)][1]
    x2 = landmarks[int(index2)][0]
    y2 = landmarks[int(index2)][1]

    x_diff = float(x1 - x2)

    if (y1 == y2): y_diff = 0.1
    if (y1 < y2): y_diff = float(np.absolute(y1 - y2))
    if (y1 > y2): 
        y_diff = 0.1
        print("Error: Facial feature located below chin.")

    return np.absolute(math.atan(x_diff/y_diff))


#image_dir should contain sub-folders containing the images where features need to be extracted
#only one face should be present in each image
#if multiple faces are detected by OpenCV, image must be manually edited; the parameters of the face-detection routine can also be changed
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        xi = self._calc_apriori_snr(gamma)
        self._prevGamma = gamma
        nu = gamma * xi / (1.0 + xi)
        self._G = (self._gamma15 * np.sqrt(nu) / gamma) * np.exp(-nu / 2.0) *\
                  ((1.0 + nu) * spc.i0(nu / 2.0) + nu * spc.i1(nu / 2.0))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        xi = self._calc_apriori_snr(gamma)
        # xi = self._calc_apriori_snr2(gamma,n_pow)
        self._prevGamma = gamma
        nu = gamma * xi / (1.0 + xi)
        self._G = xi / (1.0 + xi) * np.exp(0.5 * spc.exp1(nu))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
项目:pyssp    作者:shunsukeaihara    | 项目源码 | 文件源码
def compute_by_noise_pow(self, signal, n_pow):
        s_spec = np.fft.fftpack.fft(signal * self._window)
        s_amp = np.absolute(s_spec)
        s_phase = np.angle(s_spec)
        gamma = self._calc_aposteriori_snr(s_amp, n_pow)
        # xi = self._calc_apriori_snr2(gamma,n_pow)
        xi = self._calc_apriori_snr(gamma)
        self._prevGamma = gamma
        u = 0.5 - self._mu / (4.0 * np.sqrt(gamma * xi))
        self._G = u + np.sqrt(u ** 2.0 + self._tau / (gamma * 2.0))
        idx = np.less(s_amp ** 2.0, n_pow)
        self._G[idx] = self._constant
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = xi[idx] / (xi[idx] + 1.0)
        idx = np.isnan(self._G) + np.isinf(self._G)
        self._G[idx] = self._constant
        self._G = np.maximum(self._G, 0.0)
        amp = self._G * s_amp
        amp = np.maximum(amp, 0.0)
        amp2 = self._ratio * amp + (1.0 - self._ratio) * s_amp
        self._prevAmp = amp
        spec = amp2 * np.exp(s_phase * 1j)
        return np.real(np.fft.fftpack.ifft(spec))
项目:covar_me_app    作者:CovarMe    | 项目源码 | 文件源码
def read_mongodb_matrix(tickers, matrix_name):
    mis = MatrixItem.objects(i__in = tickers,
                             j__in = tickers,
                             matrix_name = matrix_name)
    n = len(tickers)
    available_tickers = set([mi.i for mi in mis])
    np.random.seed(n)
    a = np.absolute(np.random.normal(0, 0.001, [n, n]))
    a_triu = np.triu(a, k=0)
    a_tril = np.tril(a, k=0)
    a_diag = np.diag(np.diag(a))
    a_sym_triu = a_triu + a_triu.T - a_diag
    matrix = pd.DataFrame(a_sym_triu,
                          index = tickers,
                          columns = tickers)
    for mi in mis:
        if abs(mi.v) > 10:
            mi.v = 0.001

        matrix.set_value(mi.i, mi.j, mi.v)
        matrix.set_value(mi.j, mi.i, mi.v)

    matrix = matrix.round(6)
    return matrix
项目:HousePrices    作者:MizioAnd    | 项目源码 | 文件源码
def outlier_identification(self, model, x_train, y_train):
        # Split the training data into an extra set of test
        x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train)
        print('\nOutlier shapes')
        print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split))
        model.fit(x_train_split, y_train_split)
        y_predicted = model.predict(x_test_split)
        residuals = np.absolute(y_predicted - y_test_split)
        rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split)
        outliers_mask = residuals >= rmse_pred_vs_actual
        # outliers_mask = np.insert(np.zeros((np.shape(y_train_split)[0],), dtype=np.int), np.shape(y_train_split)[0],
        #                           outliers_mask)
        outliers_mask = np.concatenate([np.zeros((np.shape(y_train_split)[0],), dtype=bool), outliers_mask])
        not_an_outlier = outliers_mask == 0
        # Resample the training set from split, since the set was randomly split
        x_out = np.insert(x_train_split, np.shape(x_train_split)[0], x_test_split, axis=0)
        y_out = np.insert(y_train_split, np.shape(y_train_split)[0], y_test_split, axis=0)
        return x_out[not_an_outlier, ], y_out[not_an_outlier, ]
项目:APEX    作者:ymollard    | 项目源码 | 文件源码
def wait_for_human_interaction(self, arm_threshold=1, joystick_threshold=0.15):
        rospy.loginfo("We are waiting for human interaction...")

        def detect_arm_variation():
            new_effort = np.array(self.topics.torso_l_j.effort)
            delta = np.absolute(effort - new_effort)
            return np.amax(delta) > arm_threshold

        def detect_joy_variation():
            return np.amax(np.abs(self.topics.joy1.axes)) > joystick_threshold

        effort = np.array(self.topics.torso_l_j.effort)
        rate = rospy.Rate(50)
        is_joystick_demo = None
        while not rospy.is_shutdown():
            if detect_arm_variation():
                is_joystick_demo = False
                break
            elif detect_joy_variation():
                is_joystick_demo = True
                break
            rate.sleep()
        return is_joystick_demo

    ################################# Service callbacks
项目:zignal    作者:ronnyandersson    | 项目源码 | 文件源码
def peak(self):
        """Calculate peak sample value (with sign)"""

        if len(self.samples) != 0:
            if np.issubdtype(self.samples.dtype, float):
                idx = np.absolute(self.samples).argmax(axis=0)
            else:
                # We have to be careful when checking two's complement since the absolute value
                # of the smallest possible value can't be represented without overflowing. For
                # example: signed 16bit has range [-32768, 32767] so abs(-32768) cannot be
                # represented in signed 16 bits --> use a bigger datatype
                bigger  = np.asarray(self.samples, dtype=np.int64)
                idx     = np.absolute(bigger).argmax(axis=0)

            peak = np.array([self.samples[row,col] for col, row in enumerate(idx)])
        else:
            # no samples are set but channels are configured
            idx  = np.zeros(self.ch, dtype=np.int64)
            peak = np.zeros(self.ch)
            peak[:] = float('nan')

        return peak, idx
项目:zignal    作者:ronnyandersson    | 项目源码 | 文件源码
def normalise(self):
        """Normalise samples so that the new range is
        [-1.0,  1.0] for floats

        Converts **IN PLACE**

        TODO: verify
        [-2^n, 2^n-1] for ints
        """
        peaks, unused_idx = self.peak()
        self._logger.debug("raw peaks: %s" %peaks)

        max_abs = np.max(np.absolute(peaks))
        self._logger.debug("max_abs: %s" %max_abs)

        self.samples = self.samples/max_abs

        peaks, unused_idx = self.peak()
        self._logger.debug("new peaks: %s" %peaks)

#===================================================================================================
# Audio sub-classes
#===================================================================================================
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def single_spectrogram(inseq,fs,wlen,h,imag=False):
    """
        imag: Return Imaginary Data of the STFT on True 
    """
    NFFT = int(2**(np.ceil(np.log2(wlen)))) 
    K = np.sum(hamming(wlen, False))/wlen
    raw_data = inseq.astype('float32')
    raw_data = raw_data/np.amax(np.absolute(raw_data))
    stft_data,_,_ = STFT(raw_data,wlen,h,NFFT,fs)
    s = np.absolute(stft_data)/wlen/K;
    if np.fmod(NFFT,2):
        s[1:,:] *=2
    else:
        s[1:-2] *=2        
    real_data = np.transpose(20*np.log10(s + 10**-6)).astype(np.float32)
    if imag:
        imag_data = np.angle(stft_data).astype(np.float32)
        return real_data,imag_data 
    return real_data
项目:Vision-based-parking-lot-availability-OpenCV    作者:Saar1312    | 项目源码 | 文件源码
def getEdges(gray,detector,min_thr=None,max_thr=None):
    """
        Where detector in {1,2,3,4}
        1: Laplacian
        2: Sobelx
        3: Sobely
        4: Canny
        5: Sobelx with possitive and negative slope (in 2 negative slopes are lost) 
    """
    if min_thr is None:
        min_thr = 100
        max_thr = 200
    if detector == 1:
        return cv2.Laplacian(gray,cv2.CV_64F)
    elif detector == 2:
        return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
    elif detector == 3:
        return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
    elif detector == 4:
        return cv2.Canny(gray,min_thr,max_thr)  # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
                                                # intensity gradient -value that measures how different is a pixel to its neighbors-)
    elif detector == 5:
        sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
        abs_sobel64f = np.absolute(sobelx64f)
        return np.uint8(abs_sobel64f)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def tune_everything(x0squared, C, T, gmin, gmax):
  # First tune based on dynamic range    
  if C==0:
    dr=gmax/gmin
    mustar=((np.sqrt(dr)-1)/(np.sqrt(dr)+1))**2
    alpha_star = (1+np.sqrt(mustar))**2/gmax

    return alpha_star,mustar

  dist_to_opt = x0squared
  grad_var = C
  max_curv = gmax
  min_curv = gmin
  const_fact = dist_to_opt * min_curv**2 / 2 / grad_var
  coef = [-1, 3, -(3 + const_fact), 1]
  roots = np.roots(coef)
  roots = roots[np.real(roots) > 0]
  roots = roots[np.real(roots) < 1]
  root = roots[np.argmin(np.imag(roots) ) ]

  assert root > 0 and root < 1 and np.absolute(root.imag) < 1e-6

  dr = max_curv / min_curv
  assert max_curv >= min_curv
  mu = max( ( (np.sqrt(dr) - 1) / (np.sqrt(dr) + 1) )**2, root**2)

  lr_min = (1 - np.sqrt(mu) )**2 / min_curv
  lr_max = (1 + np.sqrt(mu) )**2 / max_curv

  alpha_star = lr_min
  mustar = mu

  return alpha_star, mustar
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def world_2_voxel(world_coordinates, origin, spacing):
    stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
    voxel_coordinates = stretched_voxel_coordinates / spacing
    return voxel_coordinates
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_non_binary_ufunc(self):
        """ Test that ireduce_ufunc raises ValueError if non-binary ufunc is used """
        with self.assertRaises(ValueError):
            ireduce_ufunc(range(10), ufunc = np.absolute)
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def slices_from_global_coords(self, slices):
    """
    Used for converting from mip 0 coordinates to upper mip level
    coordinates. This is mainly useful for debugging since the neuroglancer
    client displays the mip 0 coordinates for your cursor.
    """

    maxsize = list(self.mip_volume_size(0)) + [ self.num_channels ]
    minsize = list(self.mip_voxel_offset(0)) + [ 0 ]

    slices = generate_slices(slices, minsize, maxsize)[:3]
    lower = Vec(*map(lambda x: x.start, slices))
    upper = Vec(*map(lambda x: x.stop, slices))
    step = Vec(*map(lambda x: x.step, slices))

    lower /= self.downsample_ratio
    upper /= self.downsample_ratio

    signs = step / np.absolute(step)
    step = signs * max2(np.absolute(step / self.downsample_ratio), Vec(1,1,1))
    step = Vec(*np.round(step))

    return [
      slice(lower.x, upper.x, step.x),
      slice(lower.y, upper.y, step.y),
      slice(lower.z, upper.z, step.z)
    ]
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def worldToVoxelCoord(worldCoord, origin, spacing):
  stretchedVoxelCoord = np.absolute(worldCoord - origin)
  voxelCoord = stretchedVoxelCoord / spacing
  return voxelCoord
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def world_to_voxel_coord(worldCoord, origin, spacing):
    strectchedVoxelCoord = np.absolute(worldCoord - origin)
    voxelCoord = strectchedVoxelCoord / spacing

    return voxelCoord
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def world2voxel(world_coord, origin, spacing):
    stretched_voxel_coord = np.absolute(world_coord - origin)
    voxel_coord = stretched_voxel_coord / spacing
    return voxel_coord