Python numpy 模块,polyfit() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.polyfit()

项目:pwtools    作者:elcorto    | 项目源码 | 文件源码
def fit(self):
        # Quadratic fit to get an initial guess for the parameters. 
        # Thanks: https://github.com/materialsproject/pymatgen
        # -> pymatgen/io/abinitio/eos.py 
        a, b, c = np.polyfit(self.volume, self.energy, 2)
        v0 = -b/(2*a)
        e0 = a*v0**2 + b*v0 + c
        b0 = 2*a*v0
        b1 = 4  # b1 is usually a small number like 4
        if not self.volume.min() < v0 and v0 < self.volume.max():
            raise StandardError('The minimum volume of a fitted parabola is not in the input volumes')

        # need to use lst2dct and dct2lst here to keep the order of parameters 
        pp0_dct = dict(e0=e0, b0=b0, b1=b1, v0=v0)
        target = lambda pp, v: self.energy - self.func(v, self.func.lst2dct(pp))
        pp_opt, ierr = leastsq(target, 
                               self.func.dct2lst(pp0_dct), 
                               args=(self.volume,))
        self.params = self.func.lst2dct(pp_opt)
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def draw_bs_pairs_linreg(x, y, size=1):
    """Perform pairs bootstrap for linear regression."""

    # Set up array of indices to sample from: inds
    inds = np.arange(len(x))

    # Initialize replicates: bs_slope_reps, bs_intercept_reps
    bs_slope_reps = np.empty(size)
    bs_intercept_reps = np.empty(size)

    # Generate replicates
    for i in range(size):
        bs_inds = np.random.choice(inds, size=len(inds))
        bs_x, bs_y = x[bs_inds], y[bs_inds]
        # noinspection PyTupleAssignmentBalance
        bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)

    return bs_slope_reps, bs_intercept_reps
项目:tdx_formula    作者:woodylee1974    | 项目源码 | 文件源码
def SLOPE(self, param):
        class Context:
            def __init__(self, N):
                self.N = N
                self.q = deque([], self.N)
                self.x = [i for i in range(self.N)]

            def handleInput(self, value):
                if len(self.q) < self.N:
                    self.q.append(value)
                    return 0
                self.q.append(value)
                z1 = np.polyfit(self.x, self.q, 1)
                return z1[0]
        ctx = Context(param[1])
        result = param[0].apply(ctx.handleInput)
        return result
项目:tdx_formula    作者:woodylee1974    | 项目源码 | 文件源码
def FORCAST(self, param):
        class Context:
            def __init__(self, N):
                self.N = N
                self.q = deque([], self.N)
                self.x = [i for i in range(self.N)]

            def handleInput(self, value):
                if len(self.q) < self.N:
                    self.q.append(value)
                    return np.NaN
                z1 = np.polyfit(self.x, self.q, 1)
                fn = np.poly1d(z1)
                y = fn(self.N + 1)
                self.q.append(value)
                return y

        ctx = Context(param[1])
        result = param[0].apply(ctx.handleInput)
        return result

#????
项目:PyGPS    作者:gregstarr    | 项目源码 | 文件源码
def minScalErr(stec,el,z,thisBias):
    """
    this determines the slope of the vTEC vs. Elevation line, which
    should be minimized in the minimum scalloping technique for
    receiver bias removal
    inputs:
        stec - time indexed Series of slant TEC values
        el - corresponding elevation values, also Series
        z - mapping function values to convert to vTEC from entire file, may
            contain nans, Series
        thisBias - the bias to be tested and minimized
    """

    intel=np.asarray(el[stec.index],int) # bin the elevation values into int
    sTEC=np.asarray(stec,float)
    zmap = z[stec.index]
    c=np.array([(i,np.average((sTEC[intel==i]-thisBias)
                              /zmap[intel==i])) for i in np.unique(intel) if i>30])

    return np.polyfit(c[:,0],c[:,1],1)[0]
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def draw_bs_pairs_linreg(x, y, size=1):
    """Perform pairs bootstrap for linear regression."""
    # Set up array of indices to sample from: inds
    inds = np.arange(len(x))

    # Initialize replicates: bs_slope_reps, bs_intercept_reps
    bs_slope_reps = np.empty(size)
    bs_intercept_reps = np.empty(size)

    # Generate replicates
    for i in range(size):
        bs_inds = np.random.choice(inds, size=len(inds))
        bs_x, bs_y = x[bs_inds], y[bs_inds]
        bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)

    return bs_slope_reps, bs_intercept_reps
项目:Python-Scripts-Repo-on-Data-Science    作者:qalhata    | 项目源码 | 文件源码
def draw_bs_pairs_linreg(x, y, size=1):
    """Perform pairs bootstrap for linear regression."""

    # Set up array of indices to sample from: inds
    inds = np.arange(len(x))

    # Initialize replicates: bs_slope reps, bs_intercept_reps
    bs_slope_reps = np.empty(size)
    bs_intercept_reps = np.empty(size)

    # Generate replicates
    for i in range(size):
        bs_inds = np.random.choice(inds, size=len(inds))
        bs_x, bs_y = x[bs_inds], y[bs_inds]
        bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)

    return bs_slope_reps, bs_intercept_reps
项目:Python-Scripts-Repo-on-Data-Science    作者:qalhata    | 项目源码 | 文件源码
def draw_bs_pairs(x, y, func, size=1):
    """Perform pairs bootstrap for linear regression."""

    # Set up array of indices to sample from: inds
    inds = np.arange(len(x))
    # Initialize replicates
    bs_replicates = np.empty(size)
    # bs_intercept_reps = ____

    # Generate replicates
    for i in range(size):
        bs_inds = np.random.choice(inds, len(inds))
        bs_x, bs_y = x[bs_inds], y[bs_inds]
        bs_replicates[i] = func(bs_x, bs_y)
        # bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)

    return bs_replicates
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_polyfit_build(self):
        # Ticket #628
        ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
               9.95368241e+00, -3.14526520e+02]
        x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
             104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
             116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
             130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
             146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
             158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
             170, 171, 172, 173, 174, 175, 176]
        y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
             6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
             13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
             7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
             6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
             6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
             8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
        tested = np.polyfit(x, y, 4)
        assert_array_almost_equal(ref, tested)
项目:nexrad_basemap    作者:rskschrom    | 项目源码 | 文件源码
def calculateKDP(phiDP, ran):
    # smooth phiDP field and take derivative
    numRan = ran.shape[0]
    kdp = np.ma.masked_all(phiDP.shape)
    smoothPhiDP = smPhiDP(phiDP, ran)

    # take derivative of kdp field
    winLen = 11
    rprof = ran[0:winLen*2-1]/1000.

    for i in range(numRan-winLen*3):
        numvalid = smoothPhiDP[:,i:i+winLen*2-1].count(axis=1)
        max_numv = np.max(numvalid)
        if max_numv==(winLen*2-1):
            kdp[numvalid==(winLen*2-1),i+winLen] = 0.5*np.polyfit(rprof,
                smoothPhiDP[numvalid==(winLen*2-1),i:i+winLen*2-1].transpose(), 1)[0]
    return kdp

# function for creating color map
#----------------------------------
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def hplot(name, h, err, xlab="", ylab="", hstr="h", rate=None, fit=True, fig=True):
    from matplotlib.pyplot import figure, loglog, xlabel, ylabel, legend, show
    if fig is True:
        figure()
    loglog(h, err, 's-', label=name)
    if fit:
        p = numpy.polyfit(numpy.log(h), numpy.log(err), 1)
        C = numpy.exp(p[1])
        alpha = p[0]
        alg = [C*n**alpha for n in h]
        loglog(h, alg, '--', label="%.3f*%s^(%.3f)" %(C, hstr, alpha))
    if rate and h[0] != 0:
        alg = [err[0]/(h[0]**rate)*n**rate for n in h]
        loglog(h, alg, '--', label="%s^(%.3f)" %(hstr, rate))
    xlabel(xlab)
    ylabel(ylab)
    legend(loc='best')

# --- create interpolation for different h and orders ---
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def default1():
    numRadialPts = 144;
    rhoMin = 0.1
    rhoMax = 0.9
    rho = np.linspace(rhoMin, rhoMax, numRadialPts);
    qbar = 0.854 + 2.184 * rho**2;

    minorRadius = 0.28;  # a
    majorRadius = 0.71;  # R0
    r = rho * minorRadius;
    safetyFactor = qbar / np.sqrt(1 - (r/majorRadius)**2);

    # fit
    polynomialDegree = 3;
    p = np.polyfit(rho, safetyFactor, polynomialDegree)
    qCoeffs = p[::-1]
    return qCoeffs
项目:pslab-desktop-apps    作者:fossasia    | 项目源码 | 文件源码
def PCSCH3(self,row):
        actuals=[];read=[]
        resistance = float(self.tbl.item(row,0).text())
        for a in np.linspace(.2e-3,1.5e-3,20):
            actuals.append( self.I.DAC.setCurrent(a) )
            time.sleep(0.001)
            read.append (self.I.get_average_voltage('CH3',samples=5))
        read = np.array(read)/resistance 
        actuals = np.array(actuals)
        sread = read*1e3
        sactuals = actuals*1e3
        self.DacCurves['PCS'].setData(sactuals,sread-sactuals)
        self.tbl.item(row,1).setText(string.join(['%.3f'%a for a in sread-sactuals],' '))
        if np.any(abs(read-actuals)>10e-6):self.setSuccess(self.tbl.item(row,1),0)
        else: self.setSuccess(self.tbl.item(row,1),1)

        fitvals = np.polyfit(self.I.DAC.CHANS['PCS'].VToCode(read),self.I.DAC.CHANS['PCS'].VToCode(actuals),1)
        print (fitvals)
        if list(fitvals):
            self.PCS_SLOPE = fitvals[0] #slope
            self.PCS_OFFSET = fitvals[1]    #offset
项目:pslab-desktop-apps    作者:fossasia    | 项目源码 | 文件源码
def PCSCH3(self,row):
        actuals=[];read=[]
        resistance = float(self.tbl.item(row,0).text())
        for a in np.linspace(.2e-3,1.5e-3,20):
            actuals.append( self.I.DAC.setCurrent(a) )
            time.sleep(0.001)
            read.append (self.I.get_voltage('CH3',samples=5))
        read = np.array(read)/resistance 
        actuals = np.array(actuals)
        sread = read*1e3
        sactuals = actuals*1e3
        self.DacCurves['PCS'].setData(sactuals,sread-sactuals)
        self.tbl.item(row,1).setText(string.join(['%.3f'%a for a in sread-sactuals],' '))
        if np.any(abs(read-actuals)>10e-6):self.setSuccess(self.tbl.item(row,1),0)
        else: self.setSuccess(self.tbl.item(row,1),1)

        fitvals = np.polyfit(self.I.DAC.CHANS['PCS'].VToCode(read),self.I.DAC.CHANS['PCS'].VToCode(actuals),1)
        print (fitvals)
        if list(fitvals):
            self.PCS_SLOPE = fitvals[0] #slope
            self.PCS_OFFSET = fitvals[1]    #offset
项目:supvisors    作者:julien6387    | 项目源码 | 文件源码
def get_linear_regression(xdata, ydata):
    """ Calculate the coefficients of the linear equation corresponding
    to the linear regression of a series of points. """
    try:
        import numpy
        return tuple(numpy.polyfit(xdata, ydata, 1))
    except ImportError:
        # numpy not available
        # try something approximate and simple
        datasize = len(xdata)
        sum_x = float(sum(xdata))
        sum_y = float(sum(ydata))
        sum_xx = float(sum(map(lambda x: x * x, xdata)))
        sum_products = float(sum([xdata[i] * ydata[i]
                                  for i in range(datasize)]))
        a = (sum_products - sum_x * sum_y / datasize) / (
            sum_xx - (sum_x * sum_x) / datasize)
        b = (sum_y - a * sum_x) / datasize
        return a, b
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def polyfit_baseline(bands, intensities, poly_order=5, num_stdv=3.,
                     max_iter=200):
    '''Iteratively fits a polynomial, discarding far away points as peaks.
    Similar in spirit to ALS and related methods.
    Automated method for subtraction of fluorescence from biological Raman spectra
    Lieber & Mahadevan-Jansen 2003
    '''
    fit_pts = intensities.copy()
    # precalculate [x^p, x^p-1, ..., x^1, x^0]
    poly_terms = bands[:, None] ** np.arange(poly_order, -1, -1)
    for _ in range(max_iter):
        coefs = np.polyfit(bands, fit_pts.T, poly_order)
        baseline = poly_terms.dot(coefs).T
        diff = fit_pts - baseline
        thresh = diff.std(axis=-1) * num_stdv
        mask = diff > np.array(thresh, copy=False)[..., None]
        unfitted = np.count_nonzero(mask)
        if unfitted == 0:
            break
        fit_pts[mask] = baseline[mask]  # these points are peaks, discard
    else:
        print("Warning: polyfit_baseline didn't converge in %d iters" % max_iter)
    return baseline
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def band_center(spectrum, low_endmember=None, high_endmember=None, degree=3):
    x = spectrum.index
    y = spectrum

    if not low_endmember:
        low_endmember = x[0]
    if not high_endmember:
        high_endmember = x[-1]

    ny = y[low_endmember:high_endmember]

    fit = np.polyfit(ny.index, ny, degree)

    center_fit = Series(np.polyval(fit, ny.index), ny.index)
    center = band_minima(center_fit)

    return center, center_fit
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_polyfit_build(self):
        # Ticket #628
        ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
               9.95368241e+00, -3.14526520e+02]
        x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
             104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
             116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
             130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
             146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
             158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
             170, 171, 172, 173, 174, 175, 176]
        y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
             6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
             13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
             7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
             6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
             6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
             8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
        tested = np.polyfit(x, y, 4)
        assert_array_almost_equal(ref, tested)
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def trendLine(self, axis_choose=None):
        stable_sec= int(self.record_sec_le.text())
        stable_count = int(stable_sec * (1/0.007))

        if axis_choose:
            axis = axis_choose
        else:
            axis = str(self.axis_combobox.currentText())

        x = self.raw_data['time'][:stable_count]
        y = self.raw_data[axis][:stable_count]
        coefficients = np.polyfit(x,y,1)
        p = np.poly1d(coefficients)
        coefficient_of_dermination = r2_score(y, p(x))

        self.trendLine_content1_label.setText("Trendline: " + str(p))
        self.trendLine_content2_label.setText("R: " + str(coefficient_of_dermination))
        return coefficients
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def FitPolynomial(fig, x, y, n):

#solution, res = np.polynomial.polynomial.polyfit(x,y,order,full=True)

    solution, C_p = np.polyfit(x, y, n, cov=True)  # C_z is estimated covariance matrix

    # Do the interpolation for plotting:
    xrange = fig.get_xlim()
    t = np.linspace(xrange[0],xrange[1],100)
    # Matrix with rows 1, t, t**2, ...:
    TT = np.vstack([t**(n-i) for i in range(n+1)]).T
    yi = np.dot(TT, solution)  # matrix multiplication calculates the polynomial values
    C_yi = np.dot(TT, np.dot(C_p, TT.T)) # C_y = TT*C_z*TT.T
    sig_yi = np.sqrt(np.diag(C_yi))  # Standard deviations are sqrt of diagonal


    fig.plot(t,yi, 'k-')
    # fig.plot(t,yi+sig_yi, 'k--')
    # fig.plot(t,yi-sig_yi, 'k--')

    return solution

# Take the log of the y value
项目:PC_plus    作者:MTS-Strathclyde    | 项目源码 | 文件源码
def compute_xvvr(self):
        """ Return xvv(r) matrix """
        r = np.array([i*self.dr for i in range(self.ngrid)])
        k = self.get_k()
        xvvr = [["" for i in range(self.nsites)] for j in range(self.nsites)]
        for i in range(self.nsites):
            for j in range(self.nsites):
                xvvk_ij = self.xvv_data[:,i,j]
                xvvr_ij = pubfft.sinfti(xvvk_ij*k, self.dr, -1)/r
#                n_pots_for_interp = 6
#                r_for_interp = r[1:n_pots_for_interp+1]
#                xvvr_for_interp = xvvr_ij[:n_pots_for_interp]
#                poly_coefs = np.polyfit(r_for_interp, xvvr_for_interp, 3)
#                poly_f = np.poly1d(poly_coefs)
#                xvvr[i][j] = [poly_f(0)]
                xvvr[i][j] = xvvr_ij
        return r, np.swapaxes(xvvr, 0, 2)
项目:PC_plus    作者:MTS-Strathclyde    | 项目源码 | 文件源码
def compute_zr(self):
        """ Return z(r) matrix """
        r = np.array([i*self.dr for i in range(self.ngrid)])
        k, zk = self.compute_zk()
        print 'computed zk',zk.shape
        zr = [["" for i in range(self.nsites)] for j in range(self.nsites)]
        for i in range(self.nsites):
            for j in range(self.nsites):
                zk_ij = zk[1:,i,j]
                zr_ij = pubfft.sinfti(zk_ij*k[1:], self.dr, -1)/r[1:]
                #zr_ij = np.abs(fftpack.fft(zk_ij))
                n_pots_for_interp = 6
                r_for_interp = r[1:n_pots_for_interp+1]
                zr_for_interp = zr_ij[:n_pots_for_interp]
                poly_coefs = np.polyfit(r_for_interp, zr_for_interp, 3)
                poly_f = np.poly1d(poly_coefs)
                zr[i][j] = [poly_f(0)]
                zr[i][j].extend(zr_ij)
        return r, np.swapaxes(zr, 0, 2)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def smooth(x, y, weights):
    '''
    in case the NLF cannot be described by 
    a square root function
    commit bounded polynomial interpolation
    '''
    # Spline hard to smooth properly, therefore solfed with
    # bounded polynomal interpolation
    # ext=3: no extrapolation, but boundary value
#     return UnivariateSpline(x, y, w=weights,
#                             s=len(y)*weights.max()*100, ext=3)

#     return np.poly1d(np.polyfit(x,y,w=weights,deg=2))
    p = np.polyfit(x, y, w=weights, deg=2)
    if np.any(np.isnan(p)):
        # couldn't even do polynomial fit
        # as last option: assume constant noise
        my = np.average(y, weights=weights)
        return lambda x: my
    return lambda xint: np.poly1d(p)(np.clip(xint, x[0], x[-1]))
项目:cv4ag    作者:worldbank    | 项目源码 | 文件源码
def __init__(self,xpixel,ypixel,library,zoomLevel):
        self.xpixel=xpixel
        self.ypixel=ypixel
        #load csv data. Rows are zoom levels, column are latitude
        #and values are meters per pixel
        with open(library,'rb') as csvfile:
            metersperpixel = list(csv.reader(csvfile,delimiter=","))
        latitudes=[]
        #convert to floats
        for element in metersperpixel[0][1:]:
            latitudes.append(float(element))
        for row in range(1,len(metersperpixel)):
            res_values=[]
            if int(metersperpixel[row][0])==zoomLevel:
            #convert to floats
                for element in metersperpixel[row][1:]:
                    res_values.append(float(element))
                self.fitvalues=\
                    numpy.polyfit(latitudes, #fit to latitutde values
                    res_values, 3)  #3rd degree polynomial fit
                print "Fit done."   
                break
项目:gpvdm    作者:roderickmackenzie    | 项目源码 | 文件源码
def callback_click_ok(self,data):
        points = np.array(self.interal_data)    #[(1, 1), (2, 2), (3, 3), (7, 3), (9, 3)]
        # get x and y vectors
        x = points[:,0]
        y = points[:,1]

        # calculate polynomial
        terms=int(self.sp.value())
        self.ret = np.polyfit(x, y, terms)
        f = np.poly1d(self.ret)

        tot=""
        val=0
        for i in range(0,len(self.ret)):
            p=len(self.ret)-1-i
            tot=tot+str(self.ret[i])+"*pow(w,"+str(p)+")"+"+"
        tot=tot[:-1]
        self.ret_math=tot

        self.close()
项目:Thrifty    作者:swkrueger    | 项目源码 | 文件源码
def build_model_poly(detection_pairs, beacon_sdoa, nominal_sample_rate, deg=2):
    if len(detection_pairs) < deg + 1:
        # not enough beacon transmissions
        return None

    soa0 = np.array([d[0].soa for d in detection_pairs])
    soa1 = np.array([d[1].soa for d in detection_pairs])
    soa1at0 = soa1 + np.array(beacon_sdoa)
    coef = np.polyfit(soa1at0, soa0, deg)
    fit = np.poly1d(coef)
    # residuals = soa0 - fit(soa1at0)
    # print(np.mean(residuals))

    def evaluate(det0, det1):
        return (det0.soa - fit(det1.soa)) / nominal_sample_rate

    return evaluate
项目:mimclib    作者:StochasticNumerics    | 项目源码 | 文件源码
def ExpLine(*args, **kwargs):
        rate = kwargs.pop('rate', None)
        log_data = kwargs.pop('log_data', True)
        data = kwargs.pop('data', None)
        const = kwargs.pop('const', 1)
        if rate is None:
            assert data is not None and len(data) > 0, "rate or data must be given"
            x = np.array([d[0] for d in data])
            y = np.array([d[1] for d in data])
            if len(x) > 0 and len(y) > 0:
                if log_data:
                    rate = np.polyfit(np.log(x), np.log(y), 1)[0]
                else:
                    rate = np.polyfit(x, y, 1)[0]
        if "label" in kwargs:
            kwargs["label"] = kwargs.pop("label").format(rate=rate)
        return FunctionLine2D(*args, fn=lambda x, r=rate:
                              const*np.array(x)**r, data=data,
                              log_data=log_data, **kwargs)
项目:FoundryDataBrowser    作者:ScopeFoundry    | 项目源码 | 文件源码
def redo_fit(self):
        lx0, lx1 = self.power_plot_lr.getRegion()

        x0, x1 = 10**lx0, 10**lx1


        X = self.dat['power_meter_power']

        n = len(X)

        ii0 = np.argmin(np.abs(X[:n//2+1]-x0))
        ii1 = np.argmin(np.abs(X[:n//2+1]-x1))

        print(ii0,ii1)

        m, b = np.polyfit(np.log10(X[ii0:ii1]), np.log10(self.power_plot_y[ii0:ii1]), deg=1)
        print("fit", m,b) 

        fit_data = 10**(np.poly1d((m,b))(np.log10(X)))
        print("fit_data", fit_data)    
        self.power_fit_plotcurve.setData(X, fit_data)

        self.fit_text.setHtml("<h1>I<sup>{:1.2f}</sup></h1>".format(m))
        self.fit_text.setPos(0.5*(lx0+lx1), np.log10(fit_data[(ii0+ii1)//2]))
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_polyfit_build(self):
        # Ticket #628
        ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
               9.95368241e+00, -3.14526520e+02]
        x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
             104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
             116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
             130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
             146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
             158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
             170, 171, 172, 173, 174, 175, 176]
        y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
             6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
             13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
             7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
             6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
             6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
             8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
        tested = np.polyfit(x, y, 4)
        assert_array_almost_equal(ref, tested)
项目:BlueLines    作者:JacksYou    | 项目源码 | 文件源码
def scatter_crimes_population():
    """creates a scatter plot using the values of Population and Crimes Per 100000.
    iterates through the database and reads all the data in the given headers and creates plots for each data point
    """
    x = df["Number of Crimes"].values
    y = df["Population"].values
    assert len(x) == len(y)
    df["Crimes Per 100000"] = np.array([(x[i] / y[i]) * 100000.0 for i in range(len(x))], dtype="float32")

    ax = df.plot.scatter(y="Population", x="Crimes Per 100000")

    for index, row in df.iterrows():
        ax.annotate(row["Community Name"], (row["Crimes Per 100000"], row["Population"]),
                    size=7,
                    color='darkslategrey')

    x = df["Crimes Per 100000"].values
    y = df["Population"].values

    m, b = np.polyfit(x, y, 1)
    line = plt.plot(x, m * x + b, 'b--')
    plt.setp(line, color='orange', alpha=0.5, linewidth=2.0)
    plt.show()
项目:SamuROI    作者:samuroi    | 项目源码 | 文件源码
def linbleeched_F0(data):
    """
    Calculate a linear fit (:math:`y(t)=m t+y_0)` for each pixel, which is assumed to correct for bleeching effects.

    :param data: he video data of shape (M,N,T).
    :return: tuple (m,y0) with two images each with shape (M,N).
    """

    # generate c coordinates
    x = numpy.arange(data.shape[-1])
    # reshape the data to two d array, first dimension is pixel index, second dimension is time
    d = numpy.reshape(data, (data.shape[0] * data.shape[1], data.shape[-1]))
    # find fit parameters
    m, y0 = numpy.polyfit(x, d.T, 1)
    # reshape fit parameters back to image shape
    return m.reshape(data.shape[0:2]), y0.reshape(data.shape[0:2])
项目:PyGEOMET    作者:pygeomet    | 项目源码 | 文件源码
def RotatePoints(self, x_in, y_in):
        #Rotational matrix
        angle = np.pi/1.065
        rot = np.array([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]])

        #Rotation Stuff
        x = 0.
        y = np.log(1000.)*-self.rd
        d1 = np.array([[x],[y]])
        V1 = np.dot(rot,d1)
        B1 = np.linspace(d1[0],V1[0],100)
        B2 = np.linspace(d1[1],V1[1],100)
        p = np.polyfit(B1,B2,1)

        #Skew-T y-axis
        y_out = -self.rd*np.log(y_in) 

        #Skew-T x-axis
        B = (x_in + (-y/p[0]))*p[0]
        x_out = (y_out+B)/p[0]

        return x_out, y_out

    #Process winds for the hodograph
项目:financial-engineering    作者:StratiFi    | 项目源码 | 文件源码
def build_batch_lstsq_estimates(self, asset_returns, benchmark_returns):
        if not len(asset_returns) == len(benchmark_returns):
            raise '*WTF*'

        # Run Kalman filter on returns data
        beta = np.zeros(len(asset_returns))
        alpha = np.zeros(len(asset_returns))
        for enum_i, elem in enumerate(asset_returns):
            lookback = min(self.lookback, enum_i)
            # print '==> ', enum_i, len(asset_returns), len(beta)
            beta[enum_i], alpha[enum_i] = np.polyfit(benchmark_returns[enum_i - lookback:enum_i + 1],
                                                     asset_returns[enum_i - lookback:enum_i + 1], 1)

        # don't wanna do a line fit for less than 3 points, really
        beta[0], alpha[0] = 0, 0
        beta[1], alpha[1] = 0, 0

        self.alpha_betas_lstsq = np.array(zip(alpha, beta))
项目:DMTL-DDI    作者:illidanlab    | 项目源码 | 文件源码
def early_stop_by_valid(valid_auc, k):
    '''Early stop"
    '''

    if k > len(valid_auc):
        return False
    print k, valid_auc

    x = range(k)
    y = valid_auc[-k:]

    # print x, y
    slop, bias = np.polyfit(x, y, 1)
    print "slop: {0:f}".format(slop)

    stopflag = False
    if slop <= FLAGS.slope:
        stopflag = True
    return stopflag
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_polyfit_build(self):
        # Ticket #628
        ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
               9.95368241e+00, -3.14526520e+02]
        x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
             104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
             116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
             130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
             146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
             158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
             170, 171, 172, 173, 174, 175, 176]
        y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
             6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
             13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
             7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
             6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
             6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
             8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
        tested = np.polyfit(x, y, 4)
        assert_array_almost_equal(ref, tested)
项目:blog_analysis    作者:Databrawl    | 项目源码 | 文件源码
def plot_scatter_charts(data, file_name):
    scatters = []
    for lang, values in data.items():
        s = figure(width=300, plot_height=300, title=lang)
        s.yaxis.formatter = NumeralTickFormatter(format="0.0a")
        s.circle(values[0], values[1], size=10, color="navy", alpha=0.5)
        x = np.linspace(1, 100, 10)
        # noinspection PyTupleAssignmentBalance
        m, b = np.polyfit(values[0], values[1], 1)
        y = m * x + b
        corr_coef = round(pearsonr(values[0], values[1])[0], 1)
        s.line(x, y, legend=f'PCC = {corr_coef}')
        scatters.append(s)
    split_scatters = split(scatters, 3)
    p = gridplot(split_scatters)
    output_file(file_name)
    show(p)
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def defineRegionOfInterest(img, left_bottom = [0, 539], right_bottom = [900, 539], apex = [475, 320]):
    xsize = img.shape[1]
    ysize = img.shape[0]

    # Perform a linear fit (y=Ax+B) to each of the three sides of the triangle
    # np.polyfit returns the coefficients [A, B] of the fit
    fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
    fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
    fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)

    # Find the region inside the lines
    XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))

    region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \
                        (YY > (XX*fit_right[0] + fit_right[1])) & \
                        (YY < (XX*fit_bottom[0] + fit_bottom[1]))

    return region_thresholds
项目:tensorflow-wavenet    作者:ibab    | 项目源码 | 文件源码
def testEncodeDecodeShift(self):
        x = np.linspace(-1, 1, 1000).astype(np.float32)
        with self.test_session() as sess:
            encoded = mu_law_encode(x, QUANT_LEVELS)
            decoded = mu_law_decode(encoded, QUANT_LEVELS)
            roundtripped = sess.run(decoded)

        # Detect non-unity scaling and non-zero shift in the roundtripped
        # signal by asserting that slope = 1 and y-intercept = 0 of line fit to
        # roundtripped vs x values.
        coeffs = np.polyfit(x, roundtripped, 1)
        slope = coeffs[0]
        y_intercept = coeffs[1]
        EPSILON = 1e-4
        self.assertNear(slope, 1.0, EPSILON)
        self.assertNear(y_intercept, 0.0, EPSILON)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_polyfit_build(self):
        # Ticket #628
        ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
               9.95368241e+00, -3.14526520e+02]
        x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
             104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
             116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
             130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
             146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
             158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
             170, 171, 172, 173, 174, 175, 176]
        y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
             6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
             13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
             7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
             6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
             6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
             8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
        tested = np.polyfit(x, y, 4)
        assert_array_almost_equal(ref, tested)
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def hurst(ts):
    # Create the range of lag values
    window_len = 30
    hurst_ts = np.zeros((0,), dtype=np.int)
    for tail in range(window_len, len(ts)):
        lags = range(1, window_len // 2)
        # print(lags)
        # Calculate the array of the variances of the lagged differences
        cur_ts = ts[max(1, tail - window_len):tail]
        # cur_ts = ts[:tail]
        tau = [sqrt(std(subtract(cur_ts[lag:], cur_ts[:-lag]))) for lag in lags]

        # print("Time series slice len: ",len(cur_ts),len(tau),lags)
        # Use a linear fit to estimate the Hurst Exponent
        poly = polyfit(log(lags), log(tau), 1)

        # Return the Hurst exponent from the polyfit output
        # print(poly[0]*2.0)
        hurst_ts = np.append(hurst_ts, poly[0] * 2.0)
    return hurst_ts
项目:adel    作者:openalea-incubator    | 项目源码 | 文件源码
def fit_dimensions(self, data, fit_TTmax = True):
        """ 
        if fit_max is True, use blade length profile to adjust dHS_max
        """

        if (fit_TTmax and 'L_blade' in data.columns):
            dat = data.dropna(subset=['L_blade'])
            xn = self.xn(dat['rank'])
            fit = numpy.polyfit(xn,dat['L_blade'],7)
            x = numpy.linspace(min(xn), max(xn), 500)
            y = numpy.polyval(fit,x)
            self.xnmax = x[numpy.argmax(y)]
            self.TTmax = self.TTxn(self.xnmax)


        self.scale = {k: numpy.mean(data[k] / self.predict(k,data['rank'], data['nff'])) for k in data.columns if k in self.ref.columns}

        return self.scale
项目:petrophysics    作者:petroGG    | 项目源码 | 文件源码
def horner(circulation_time, times, temp):
    '''
    horner_bht_temp (circulation_time, times, temp)
    *Input parameters:
    - circulation_time - hours from last circulation;
    - times - total time since circulation stopped at 1st Run, 2nd Run and so on ...
    - temp - a list o temperatures coresponding to 1st Run, 2nd Run and so on ...
    *Returns:
    - horner_temp - formation temperature estimated by Horner method (thermometer readings
    from different runs)
    *Exemple of usage:
    horner(6, (7.0,11.5,19.5), (100,105,108))
        where:
        circulation_time = 6           # time since circulation stopped (hours)
        times = (7.0,11.5,19.5)        # total time since circulation stopped at 1st, 2nd, 3rd RUN (hours)
        temp=(100,105,108)             # temperatures recorded at 1st, 2nd, 3rd RUN (Celsius degrees)
    '''
    horner_time = np.array(times) / (circulation_time + np.array(times))
    slope,intercept = np.polyfit (np.log(horner_time), temp, 1)
    horner_temp=round(slope*np.log(1) +intercept,2)
    return horner_temp
项目:pwtools    作者:elcorto    | 项目源码 | 文件源码
def vander(points, deg):
    """N-dim Vandermonde matrix for data `points` and a polynomial of degree
    `deg`.

    Parameters
    ----------
    points : see polyfit()
    deg : int
        Degree of the poly (e.g. 3 for cubic).

    Returns
    -------
    vander : 2d array (npoints, (deg+1)**ndim)
    """
    powers = poly_powers(points.shape[1], deg)
    # low memory version, slower
    ##npoints = points.shape[0]
    ##vand = np.empty((npoints, (deg+1)**ndim), dtype=float)
    ##for ipoint in range(npoints):
    ##    vand[ipoint,:] = (points[ipoint]**powers).prod(axis=1)
    tmp = (points[...,None] ** np.swapaxes(powers, 0, 1)[None,...])
    return tmp.prod(axis=1)
项目:Machine-Learning-for-Stock-Trading    作者:wilsonsk    | 项目源码 | 文件源码
def build_scatter_plot(dframe):
    #build scatter plot: stock1 vs. stock2
    daily_ret = get_daily_returns(dframe)
    symbols = []
    for symbol in dframe[0:]:
        symbols.append(symbol)
    daily_ret.plot(kind='scatter', x=symbols[0], y=symbols[1]) 
    #use ployfit() which needs x-coords and y-coords to fit a line -- y-coords are daily return values 
    #polyfit() returns first the polynomial coefficient (beta) and second the y intercept (alpha) -- in the form y = mx + b -- m is coefficient and b is intercept
    #the idea for plotting is for every value of x we find a value of y using the line equation y = mx +b
    beta_stock1, alpha_stock1 = np.polyfit(daily_ret[symbols[1]], daily_ret[symbols[0]], 1)
    beta_stock2, alpha_stock2 = np.polyfit(daily_ret[symbols[0]], daily_ret[symbols[1]], 1)

    print "beta of ", symbols[0], "= ", beta_stock1
    print "alpha of ", symbols[0], "= ", alpha_stock1   
    print "beta of ", symbols[1], "= ", beta_stock2
    print "alpha of ", symbols[1], "= ", alpha_stock2

    plt.plot(daily_ret[symbols[0]], beta_stock2 * daily_ret[symbols[0]] + alpha_stock2, '-', color='r')
    plt.show()
项目:ArduPi-ECG    作者:ferdavid1    | 项目源码 | 文件源码
def fourierExtrapolation(x, n_predict):
    n = len(x)
    n_harm = 10                     # number of harmonics in model
    t = np.arange(0, n)
    p = np.polyfit(t, x, 1)         # find linear trend in x
    x_notrend = x - p[0] * t        # detrended x
    x_freqdom = fft.fft(x_notrend)  # detrended x in frequency domain
    f = fft.fftfreq(n)              # frequencies
    indexes = list(range(n))
    # sort indexes by frequency, lower -> higher
    indexes.sort(key = lambda i: np.absolute(f[i]))

    t = np.arange(0, n + n_predict)
    restored_sig = np.zeros(t.size)
    for i in indexes[:1 + n_harm * 2]:
        ampli = np.absolute(x_freqdom[i]) / n   # amplitude
        phase = np.angle(x_freqdom[i])          # phase
        restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
    return restored_sig + p[0] * t
项目:wmd-relax    作者:src-d    | 项目源码 | 文件源码
def __call__(self, words, weights, vocabulary_max):
        if len(words) < vocabulary_max * self.trigger_ratio:
            return words, weights

        if not isinstance(words, numpy.ndarray):
            words = numpy.array(words)

        # Tail optimization does not help with very large vocabularies
        if len(words) > vocabulary_max * 2:
            indices = numpy.argpartition(weights, len(weights) - vocabulary_max)
            indices = indices[-vocabulary_max:]
            words = words[indices]
            weights = weights[indices]
            return words, weights

        # Vocabulary typically consists of these three parts:
        # 1) the core - we found it's border - `core_end` - 15%
        # 2) the body - 70%
        # 3) the minor tail - 15%
        # (1) and (3) are roughly the same size
        # (3) can be safely discarded, (2) can be discarded with care,
        # (1) shall never be discarded.

        sorter = numpy.argsort(weights)[::-1]
        weights = weights[sorter]
        trend_start = int(len(weights) * 0.2)
        trend_finish = int(len(weights) * 0.8)
        z = numpy.polyfit(numpy.arange(trend_start, trend_finish),
                          numpy.log(weights[trend_start:trend_finish]),
                          1)
        exp_z = numpy.exp(z[1] + z[0] * numpy.arange(len(weights)))
        avg_error = numpy.abs(weights[trend_start:trend_finish] -
                              exp_z[trend_start:trend_finish]).mean()
        tail_size = numpy.argmax((numpy.abs(weights - exp_z) < avg_error)[::-1])
        weights = weights[:-tail_size][:vocabulary_max]
        words = words[sorter[:-tail_size]][:vocabulary_max]

        return words, weights
项目:rosie    作者:datasciencebr    | 项目源码 | 文件源码
def fit(self, X):
        _X = self.__aggregate_dataset(X)
        self.polynomial = np.polyfit(_X['expenses'].astype(np.long),
                                     _X['distance_traveled'].astype(np.long),
                                     3)
        self._polynomial_fn = np.poly1d(self.polynomial)
        return self
项目:PyGPS    作者:gregstarr    | 项目源码 | 文件源码
def minScalErr(stec,el,z,thisBias):

    intel=np.asarray(el[stec.index],int)
    sTEC=np.asarray(stec,float)
    zmap = z[stec.index]
    c=np.array([(i,np.average((sTEC[intel==i]-thisBias)
                              /zmap[intel==i])) for i in np.unique(intel) if i>30])

    return np.polyfit(c[:,0],c[:,1],1)[0]
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def trendline(xd, yd, order=1, c='r', alpha=1, plot_r=False, text_pos=None):
    """Make a line of best fit"""

    #Calculate trendline
    coeffs = np.polyfit(xd, yd, order)

    intercept = coeffs[-1]
    slope = coeffs[-2]
    if order == 2: power = coeffs[0]
    else: power = 0

    minxd = np.min(xd)
    maxxd = np.max(xd)

    xl = np.array([minxd, maxxd])
    yl = power * xl ** 2 + slope * xl + intercept

    #Plot trendline
    plt.plot(xl, yl, color=c, alpha=alpha)

    #Calculate R Squared
    r = sp.stats.pearsonr(xd, yd)[0]

    if plot_r == False:
        #Plot R^2 value
        if text_pos == None:
            text_pos = (0.9 * maxxd + 0.1 * minxd, 0.9 * np.max(yd) + 0.1 * np.min(yd),)
        plt.text(text_pos[0], text_pos[1], '$R = %0.2f$' % r)
    else:
        #Return the R^2 value:
        return r
项目:tensorflow_ocr    作者:BowieHsu    | 项目源码 | 文件源码
def fit_line(p1, p2):
    # fit a line ax+by+c = 0
    if p1[0] == p1[1]:
        return [1., 0., -p1[0]]
    else:
        [k, b] = np.polyfit(p1, p2, deg=1)
        return [k, -1., b]