Python numpy 模块,diff() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.diff()

项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def fill_nans(df, delta=None):
    """
    """
    if not delta:
        dt_diff = NP.diff(df.index.values)
        delta_timedelta64 = min(dt_diff)
        delta_seconds = delta_timedelta64 / NP.timedelta64(1, 's')
        delta = timedelta(seconds=delta_seconds)
    logger.info('Using delta = {} (s)'.format(delta.total_seconds()))
    index_new = PD.date_range(start=df.index[0],
                              end=df.index[-1],
                              freq=delta)
    missing = sorted(set(index_new) - set(df.index))
    if missing:
        logger.warning('Missing time indices (filled by NaNs):')
        for x in missing:
            logger.warning(x)
    return df.reindex(index_new, copy=False), delta
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def numpy_groupby(values, keys):
    """ Group a collection of numpy arrays by key arrays.
        Yields (key_tuple, view_tuple) where key_tuple is the key grouped on and view_tuple is a tuple of views into the value arrays.
          values: tuple of arrays to group
          keys: tuple of sorted, numeric arrays to group by """

    if len(values) == 0:
        return
    if len(values[0]) == 0:
        return

    for key_array in keys:
        assert len(key_array) == len(keys[0])
    for value_array in values:
        assert len(value_array) == len(keys[0])

    # The indices where any of the keys differ from the previous key become group boundaries
    key_change_indices = np.logical_or.reduce(tuple(np.concatenate(([1], np.diff(key))) != 0 for key in keys))
    group_starts = np.flatnonzero(key_change_indices)
    group_ends = np.roll(group_starts, -1)
    group_ends[-1] = len(keys[0])

    for group_start, group_end in itertools.izip(group_starts, group_ends):
        yield tuple(key[group_start] for key in keys), tuple(value[group_start:group_end] for value in values)
项目:aapm_thoracic_challenge    作者:xf4j    | 项目源码 | 文件源码
def get_labels(contours, shape, slices):
    z = [np.around(s.ImagePositionPatient[2], 1) for s in slices]
    pos_r = slices[0].ImagePositionPatient[1]
    spacing_r = slices[0].PixelSpacing[1]
    pos_c = slices[0].ImagePositionPatient[0]
    spacing_c = slices[0].PixelSpacing[0]

    label_map = np.zeros(shape, dtype=np.float32)
    for con in contours:
        num = ROI_ORDER.index(con['name']) + 1
        for c in con['contours']:
            nodes = np.array(c).reshape((-1, 3))
            assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
            z_index = z.index(np.around(nodes[0, 2], 1))
            r = (nodes[:, 1] - pos_r) / spacing_r
            c = (nodes[:, 0] - pos_c) / spacing_c
            rr, cc = polygon(r, c)
            label_map[z_index, rr, cc] = num

    return label_map
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __test_ks(self,x):
        x = x[~np.isnan(x)]
        n = x.size
        x.sort()
        yCDF = np.arange(1,n+1)/float(n)
        notdup = np.hstack([np.diff(x,1),[1]])
        notdup = notdup>0
        x_expcdf = x[notdup]
        y_expcdf = np.hstack([[0],yCDF[notdup]])
        zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
        mu = 0
        sigma = 1
        theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))

        delta1 = y_expcdf[:-1]-theocdf
        delta2 = y_expcdf[1:]-theocdf
        deltacdf = np.abs(np.hstack([delta1,delta2]))
        KSmax = deltacdf.max()
        return KSmax
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __test_ks(self,x):
        x = x[~np.isnan(x)]
        n = x.size
        x.sort()
        yCDF = np.arange(1,n+1)/float(n)
        notdup = np.hstack([np.diff(x,1),[1]])
        notdup = notdup>0
        x_expcdf = x[notdup]
        y_expcdf = np.hstack([[0],yCDF[notdup]])
        zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
        mu = 0
        sigma = 1
        theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))

        delta1 = y_expcdf[:-1]-theocdf
        delta2 = y_expcdf[1:]-theocdf
        deltacdf = np.abs(np.hstack([delta1,delta2]))
        KSmax = deltacdf.max()
        return KSmax
项目:inqbus.rainflow    作者:Inqbus    | 项目源码 | 文件源码
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def apply_emtf(df_E,
               df_B,
               emtf_key,
               index,
               extrapolate0=True):
    """
    Apply the EMTF associated with *emtf_key* to magnetometer data
    found in *df_B* and store result to *df_E*. Use USArray .xml
    repository information :class:`Index` to process the 3-D EMTFs.
    """
    logger.info('applying transfer function {}'.format(emtf_key))
    interval = NP.diff(df_B.index.values[:2])[0] / NP.timedelta64(1, 's')
    Bx = df_B.B_X.values
    By = df_B.B_Y.values
    if emtf_key.startswith('USArray'):
        xml_fname = index[emtf_key][1]
        Ex, Ey = tf_3D(Bx, By, interval, xml_fname, extrapolate0=extrapolate0)
    else:
        Ex, Ey = tf_1D(Bx, By, interval, emtf_key)
    df_E[emtf_key + '_X'] = Ex
    df_E[emtf_key + '_Y'] = Ey
    return df_E
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def interp1d_(xin_,xp,yp_):
    """
    Interpolate a uniformly sampled piecewise linear function. Mapping elements
    from xin_ to the result.  Input values will be clipped to range of xp.
        xin_ :  input tensor (real)
        xp : x grid (constant -- must be a 1d numpy array, uniformly spaced)
        yp_ : tensor of the result values at the gridpoints xp
    """
    import tensorflow as tf
    x_ = tf.clip_by_value(xin_,xp.min(),xp.max())
    dx = xp[1]-xp[0]
    assert len(xp.shape)==1,'only 1d interpolation'
    assert xp.shape[0]==int(yp_.get_shape()[0])
    assert abs(np.diff(xp)/dx - 1.0).max() < 1e-6,'must be uniformly sampled'

    newshape = [  ]
    x1_ = tf.expand_dims(x_,-1)
    dt = yp_.dtype
    wt_ = tf.maximum(tf.constant(0.,dtype=dt), 1-abs(x1_ - tf.constant(xp,dtype=dt))/dx  )
    y_ = tf.reduce_sum(wt_ * yp_,axis=-1)
    return y_
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def PseudoSectionWidget(survey, flag):
    if flag == "PoleDipole":
        ntx, nmax = xr.size-2, 8
        dxr = np.diff(xr)
    elif flag == "DipolePole":
        ntx, nmax = xr.size-1, 7
        dxr = xr
    elif flag == "DipoleDipole":
        ntx, nmax = xr.size-3, 8
        dxr = np.diff(xr)
    xzlocs = getPseudoLocs(dxr, ntx, nmax, flag)
    PseudoSectionPlot = lambda i,j,flag: PseudoSectionPlotfnc(i, j, survey, flag)
    return widgetify(PseudoSectionPlot,
                     i=IntSlider(min=0, max=ntx-1, step=1, value=0),
                     j=IntSlider(min=0, max=nmax-1, step=1, value=0),
                     flag=ToggleButtons(options=['DipoleDipole', 'PoleDipole', 'DipolePole'],
                                        description='Array Type'),)
项目:QTS_Research    作者:geome-mitbbs    | 项目源码 | 文件源码
def rsi(obj, start=-14, end=-1, price_feature='Close'):
    if isinstance(obj, str):
        obj = prices(obj, start, end, price_feature)
        start = 0
        end = -1

    if end < 0:
        end += len(obj)
    if start < 0:
        start += len(obj)

    _data = np.diff(obj[start: (end + 1)])
    len_gain = len(_data[_data > 0.0])
    len_loss = len(_data[_data < 0.0])
    if len_gain == 0 or len_loss == 0:
        return 50
    average_gain = np.mean(_data[_data > 0.0])
    average_loss = np.abs(np.mean(_data[_data < 0.0]))
    first_rs = average_gain / average_loss
    rsi = 100 - 100 / (1 + first_rs)

    return rsi
项目:pycubicspline    作者:AtsushiSakai    | 项目源码 | 文件源码
def __init__(self, x, y):
        self.b, self.c, self.d, self.w = [], [], [], []

        self.x = x
        self.y = y

        self.nx = len(x)  # dimension of x
        h = np.diff(x)

        # calc coefficient c
        self.a = [iy for iy in y]

        # calc coefficient c
        A = self.__calc_A(h)
        B = self.__calc_B(h)
        self.c = np.linalg.solve(A, B)
        #  print(self.c1)

        # calc spline coefficient b and d
        for i in range(self.nx - 1):
            self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
            tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
                (self.c[i + 1] + 2.0 * self.c[i]) / 3.0
            self.b.append(tb)
项目:muesr    作者:bonfus    | 项目源码 | 文件源码
def unique_reflections(self, hkl):
        """Returns a subset *hkl* containing only the symmetry-unique
        reflections.

        Example:

        >>> from ase.lattice.spacegroup import Spacegroup
        >>> sg = Spacegroup(225)  # fcc
        >>> sg.unique_reflections([[ 2,  0,  0], 
        ...                        [ 0, -2,  0], 
        ...                        [ 2,  2,  0], 
        ...                        [ 0, -2, -2]])
        array([[2, 0, 0],
               [2, 2, 0]])
        """
        hkl = np.array(hkl, dtype=int, ndmin=2)
        hklnorm = self.symmetry_normalised_reflections(hkl)
        perm = np.lexsort(hklnorm.T)
        iperm = perm.argsort()
        xmask = np.abs(np.diff(hklnorm[perm], axis=0)).any(axis=1)
        mask = np.concatenate(([True], xmask))
        imask = mask[iperm]
        return hkl[imask]
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def contour_to_monitor_coords(screenCnt):
    '''Apply pyimagesearch algorithm to identify tl,tr,br,bl points from a contour'''
    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = screenCnt.reshape(4, 2)
    rect = np.zeros((4, 2), dtype = "float32")

    # the top-left point has the smallest sum whereas the
    # bottom-right has the largest sum
    s = pts.sum(axis = 1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]

    # compute the difference between the points -- the top-right
    # will have the minumum difference and the bottom-left will
    # have the maximum difference
    diff = np.diff(pts, axis = 1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]

    return rect
项目:astk    作者:openalea-incubator    | 项目源码 | 文件源码
def __call__(self, time_sequence, weather_data):
        """ Compute thermal time accumulation over time_sequence

        :Parameters:
        ----------
        - `time_sequence` (panda dateTime index)
            A sequence of TimeStamps indicating the dates of all elementary time steps of the simulation
        - weather (alinea.astk.Weather instance)
            A Weather database

        """    
        try:
            Tair = weather_data.temperature_air[time_sequence]
        except:
            #strange extract needed on visualea 1.0 (to test again with ipython in visualea)
            T_data = weather_data[['temperature_air']]
            Tair = numpy.array([float(T_data.loc[d]) for d in time_sequence])
        Tcut = numpy.maximum(numpy.zeros_like(Tair), Tair - self.Tbase)
        days = [0] + [((t - time_sequence[0]).total_seconds()+ 3600) / 3600 / 24 for t in time_sequence]
        dt = numpy.diff(days).tolist()
        return numpy.cumsum(Tcut * dt)

# functional call for nodes
项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def monotonic(sequence):
    '''test for stricly increasing array-like input
    May be used to determine when need for no bump,
    no flush routine is no longer required.
    If test is true, and there are no job changes,
    special rights, or furlough recalls,
    then a straight stovepipe job assignment routine may
    be implemented (fast).
    input
        sequence
            array-like input (list or numpy array ok)
    '''
    seq_diff = np.diff(sequence)
    return np.all(seq_diff >= 0)


# GET_MONTH_SLICE
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _cut_windows_vertically(self, door_top, roof_top, sky_sig, win_strip):
        win_sig = np.percentile(win_strip, 85, axis=1)
        win_sig[sky_sig > 0.5] = 0
        if win_sig.max() > 0:
            win_sig /= win_sig.max()
        win_sig[:roof_top] = 0
        win_sig[door_top:] = 0
        runs, starts, values = run_length_encode(win_sig > 0.5)
        win_heights = runs[values]
        win_tops = starts[values]
        if len(win_heights) > 0:
            win_bottom = win_tops[-1] + win_heights[-1]
            win_top = win_tops[0]
            win_vertical_spacing = np.diff(win_tops).mean() if len(win_tops) > 1 else 0
        else:
            win_bottom = win_top = win_vertical_spacing = -1

        self.top = int(win_top)
        self.bottom = int(win_bottom)
        self.vertical_spacing = int(win_vertical_spacing)
        self.vertical_scores = make_list(win_sig)
        self.heights = np.array(win_heights)
        self.tops = np.array(win_tops)
项目:IDNNs    作者:ravidziv    | 项目源码 | 文件源码
def calc_mean_var_loss(epochsInds,loss_train):
    #Loss train is in dimension # epochs X #batchs
    num_of_epochs = loss_train.shape[0]
    #Average over the batchs
    loss_train_mean = np.mean(loss_train,1)
    #The diff divided by the sampled indexes
    d_mean_loss_to_dt = np.sqrt(np.abs(np.diff(loss_train_mean) / np.diff(epochsInds[:])))
    var_loss = []
    #Go over the epochs
    for epoch_index in range(num_of_epochs):
        #The loss for the specpic epoch
        current_loss = loss_train[epoch_index, :]
        #The derivative between the batchs
        current_loss_dt = np.diff(current_loss)
        #The mean of his derivative
        average_loss = np.mean(current_loss_dt)
        current_loss_minus_mean = current_loss_dt- average_loss
        #The covarince between the batchs
        cov_mat = np.dot(current_loss_minus_mean[:, None], current_loss_minus_mean[None, :])
        # The trace of the cov matrix
        trac_cov = np.trace(cov_mat)
        var_loss.append(trac_cov)
    return np.array(var_loss), d_mean_loss_to_dt
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def find_intersections(A,B):
  arrayMinimum = lambda x1, x2: np.where(x1<x2, x1, x2)
  arrayMaximum = lambda x1, x2: np.where(x1>x2, x1, x2)
  arrayAll = lambda abools: np.dstack(abools).all(axis=2)
  slope = lambda line: (lambda d: d[:,1]/d[:,0])(np.diff(line, axis=0))

  x11, x21 = np.meshgrid(A[:-1, 0], B[:-1, 0])
  x12, x22 = np.meshgrid(A[1:, 0], B[1:, 0])
  y11, y21 = np.meshgrid(A[:-1, 1], B[:-1, 1])
  y12, y22 = np.meshgrid(A[1:, 1], B[1:, 1])

  m1, m2 = np.meshgrid(slope(A), slope(B))
  # Here we use masked arrays to properly treat the rare case where a line segment is perfectly vertical
  _m1 = np.ma.masked_array(m1,m1==-np.inf)
  _m2 = np.ma.masked_array(m2,m2==-np.inf)
  yi = (_m1*(x21-x11-y21/_m2)+y11)/(1-_m1/_m2)
  xi = (yi-y21)/_m2+x21

  xconds = (arrayMinimum(x11, x12) < xi, xi <= arrayMaximum(x11, x12),
            arrayMinimum(x21, x22) < xi, xi <= arrayMaximum(x21, x22) )
  yconds = (arrayMinimum(y11, y12) < yi, yi <= arrayMaximum(y11, y12),
            arrayMinimum(y21, y22) < yi, yi <= arrayMaximum(y21, y22) )

  return xi[arrayAll(xconds)], yi[arrayAll(yconds)]
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def find_intersections(A,B):
  arrayMinimum = lambda x1, x2: np.where(x1<x2, x1, x2)
  arrayMaximum = lambda x1, x2: np.where(x1>x2, x1, x2)
  arrayAll = lambda abools: np.dstack(abools).all(axis=2)
  slope = lambda line: (lambda d: d[:,1]/d[:,0])(np.diff(line, axis=0))

  x11, x21 = np.meshgrid(A[:-1, 0], B[:-1, 0])
  x12, x22 = np.meshgrid(A[1:, 0], B[1:, 0])
  y11, y21 = np.meshgrid(A[:-1, 1], B[:-1, 1])
  y12, y22 = np.meshgrid(A[1:, 1], B[1:, 1])

  m1, m2 = np.meshgrid(slope(A), slope(B))
  # Here we use masked arrays to properly treat the rare case where a line segment is perfectly vertical
  _m1 = np.ma.masked_array(m1,m1==-np.inf)
  _m2 = np.ma.masked_array(m2,m2==-np.inf)
  yi = (_m1*(x21-x11-y21/_m2)+y11)/(1-_m1/_m2)
  xi = (yi-y21)/_m2+x21

  xconds = (arrayMinimum(x11, x12) < xi, xi <= arrayMaximum(x11, x12),
            arrayMinimum(x21, x22) < xi, xi <= arrayMaximum(x21, x22) )
  yconds = (arrayMinimum(y11, y12) < yi, yi <= arrayMaximum(y11, y12),
            arrayMinimum(y21, y22) < yi, yi <= arrayMaximum(y21, y22) )

  return xi[arrayAll(xconds)], yi[arrayAll(yconds)]
项目:pystudio    作者:satorchi    | 项目源码 | 文件源码
def Pbias(self,TES):
    '''
    find the Pbias at 90% Rn
    '''    
    filterinfo=self.filterinfo(TES)
    if filterinfo==None:return None

    Rn_ratio=self.Rn_ratio(TES)
    if not isinstance(Rn_ratio,np.ndarray):return None

    istart,iend=self.selected_iv_curve(TES)

    Rn_ratio=Rn_ratio[istart:iend]
    Ptes=self.Ptes(TES)
    Ptes=Ptes[istart:iend]

    # check that Rn_ratio is increasing
    increasing=np.diff(Rn_ratio).mean()
    if increasing<0:
        Pbias=np.interp(90., np.flip(Rn_ratio,0), np.flip(Ptes,0))
    else:
        Pbias=np.interp(90., Rn_ratio, Ptes)

    return Pbias
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def neg_loglik(self,beta):
        """ Creates the negative log-likelihood of the model

        Parameters
        ----------
        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        The negative logliklihood of the model
        """     

        mu, Y = self._model(beta)

        if self.use_ols_covariance is False:
            cm = self.custom_covariance(beta)
        else:
            cm = self.ols_covariance()

        diff = Y.T - mu.T
        ll1 =  -(mu.T.shape[0]*mu.T.shape[1]/2.0)*np.log(2.0*np.pi) - (mu.T.shape[0]/2.0)*np.linalg.slogdet(cm)[1]
        inverse = np.linalg.pinv(cm)

        return var_likelihood(ll1, mu.T.shape[0], diff, inverse)
项目:AlphaPy    作者:ScottFreeLLC    | 项目源码 | 文件源码
def pchange2(f, c1, c2):
    r"""Calculate the percentage change between two variables.

    Parameters
    ----------
    f : pandas.DataFrame
        Dataframe containing the two columns ``c1`` and ``c2``.
    c1 : str
        Name of the first column in the dataframe ``f``.
    c2 : str
        Name of the second column in the dataframe ``f``.

    Returns
    -------
    new_column : pandas.Series (float)
        The array containing the new feature.

    """
    new_column = f[c1] / f[c2] - 1.0
    return new_column


#
# Function diff
#
项目:AlphaPy    作者:ScottFreeLLC    | 项目源码 | 文件源码
def diff(f, c, n = 1):
    r"""Calculate the n-th order difference for the given variable.

    Parameters
    ----------
    f : pandas.DataFrame
        Dataframe containing the column ``c``.
    c : str
        Name of the column in the dataframe ``f``.
    n : int
        The number of times that the values are differenced.

    Returns
    -------
    new_column : pandas.Series (float)
        The array containing the new feature.

    """
    new_column = np.diff(f[c], n)
    return new_column


#
# Function down
#
项目:pytac    作者:willrogers    | 项目源码 | 文件源码
def __init__(self, x, y, post_eng_to_phys=unit_function, pre_phys_to_eng=unit_function):
        """ PChip interpolation for converting between physics and engineering units.

        Args:
            x(list): A list of points on the x axis. These must be in increasing order
                for the interpolation to work. Otherwise, a ValueError is raised.
            y(list): A list of points on the y axis. These must be in increasing or
                decreasing order. Otherwise, a ValueError is raised.

        Raises:
            ValueError: An error occured when the given y coefficients are neither in
            increasing or decreasing order.
        """
        super(self.__class__, self).__init__(post_eng_to_phys, pre_phys_to_eng)
        self.x = x
        self.y = y
        self.pp = PchipInterpolator(x, y)

        diff = numpy.diff(y)
        if not ((numpy.all(diff > 0)) or (numpy.all((diff < 0)))):
            raise ValueError("Given coefficients must be monotonically"
                             "decreasing.")
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def __init__(self, signal, smoothness_param, deriv_order=1):
        self.y = signal
        assert deriv_order > 0, 'deriv_order must be an int > 0'
        # Compute the fixed derivative of identity (D).
        d = np.zeros(deriv_order * 2 + 1, dtype=int)
        d[deriv_order] = 1
        d = np.diff(d, n=deriv_order)
        n = self.y.shape[0]
        k = len(d)
        s = float(smoothness_param)

        # Here be dragons: essentially we're faking a big banded matrix D,
        # doing s * D.T.dot(D) with it, then taking the upper triangular bands.
        diag_sums = np.vstack([
            np.pad(s * np.cumsum(d[-i:] * d[:i]), ((k - i, 0),), 'constant')
            for i in range(1, k + 1)])
        upper_bands = np.tile(diag_sums[:, -1:], n)
        upper_bands[:, :k] = diag_sums
        for i, ds in enumerate(diag_sums):
            upper_bands[i, -i - 1:] = ds[::-1][:i + 1]
        self.upper_bands = upper_bands
项目:chainladder-python    作者:jbogaardt    | 项目源码 | 文件源码
def __model_form(self, tri_array):
        w = np.nan_to_num(self.weights/tri_array[:,:,:-1]**(2-self.alpha))
        x = np.nan_to_num(tri_array[:,:,:-1]*(tri_array[:,:,1:]*0+1))
        y = np.nan_to_num(tri_array[:,:,1:])
        LDF = np.sum(w*x*y,axis=1)/np.sum(w*x*x,axis=1)
        #Chainladder (alpha=1/delta=1)
        #LDF = np.sum(np.nan_to_num(tri_array[:,:,1:]),axis=1) / np.sum(np.nan_to_num((tri_array[:,:,1:]*0+1)*tri_array[:,:,:-1]),axis=1)
        #print(LDF.shape)
        # assumes no tail
        CDF = np.append(np.cumprod(LDF[:,::-1],axis=1)[:,::-1],np.array([1]*tri_array.shape[0]).reshape(tri_array.shape[0],1),axis=1)    
        latest = np.flip(tri_array,axis=1).diagonal(axis1=1,axis2=2)   
        ults = latest*CDF
        lu = list(ults)
        lc = list(CDF)
        exp_cum_triangle = np.array([np.flipud(lu[num].reshape(tri_array.shape[2],1).dot(1/lc[num].reshape(1,tri_array.shape[2]))) for num in range(tri_array.shape[0])])
        exp_incr_triangle = np.append(exp_cum_triangle[:,:,0,np.newaxis],np.diff(exp_cum_triangle),axis=2)
        return LDF, CDF, ults, exp_incr_triangle
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(indata, test=False):
    close = indata['close'].values
    diff = np.diff(close)
    diff = np.insert(diff, 0, 0)
    sma15 = SMA(indata, timeperiod=15)
    sma60 = SMA(indata, timeperiod=60)
    rsi = RSI(indata, timeperiod=14)
    atr = ATR(indata, timeperiod=14)

    #--- Preprocess data
    xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))

    xdata = np.nan_to_num(xdata)
    if test == False:
        scaler = preprocessing.StandardScaler()
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
        joblib.dump(scaler, 'data/scaler.pkl')
    elif test == True:
        scaler = joblib.load('data/scaler.pkl')
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
    state = xdata[0:1, 0:1, :]

    return state, xdata, close

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:crick    作者:jcrist    | 项目源码 | 文件源码
def test_topk_invariants():
    s = SpaceSaving(capacity=5, dtype='f8')
    s.update(data_f8)
    for k in [0, 5]:
        top = s.topk(k)
        assert isinstance(top, np.ndarray)
        dtype = np.dtype([('item', 'f8'), ('count', 'i8'), ('error', 'i8')])
        assert top.dtype == dtype
        assert len(top) == k
        assert (np.diff(top['count']) <= 0).all()

        top2 = s.topk(k, astuples=True)
        assert len(top2) == k
        np.testing.assert_equal(top['item'], [i.item for i in top2])
        np.testing.assert_equal(top['count'], [i.count for i in top2])
        np.testing.assert_equal(top['error'], [i.error for i in top2])

    with pytest.raises(ValueError):
        s.topk(-1)
项目:sparseMF    作者:jeh0753    | 项目源码 | 文件源码
def plot_change(results):
    ''' This plot shows how each algorithm changes after each iteration. '''
    f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
    n_range = np.linspace(0, 50, 11)
    model_names = results[0].keys()
    model_range = range(len(model_names))
    for idx, model in enumerate(model_names): 
        if idx == 0:
            pass
        else:
            ax1.plot(n_range, np.insert(np.absolute(np.diff(results[0][model])), 0, results[0][model][0]), label=model)
            ax2.plot(n_range, np.insert(np.absolute(np.diff(results[1][model])), 0, results[1][model][0]), label=model)
    ax1.set_title('Root Mean Squared Error')
    ax2.set_title('Time in Seconds')
    plt.xlabel('Number of Iterations')
    plt.legend()
    plt.show()
项目:FreeDiscovery    作者:FreeDiscovery    | 项目源码 | 文件源码
def test_search_document_id(app):
    dsid, lsi_id, _, input_ds = get_features_lsi_cached(app, hashed=False)
    parent_id = lsi_id

    max_results = 2
    query_document_id = 3844

    pars = dict(parent_id=parent_id,
                max_results=max_results,
                sort=True,
                query_document_id=query_document_id)

    data = app.post_check(V01 + "/search/", json=pars)
    assert sorted(data.keys()) == ['data', 'pagination']
    data = data['data']
    for row in data:
        assert dict2type(row) == {'score': 'float',
                                  'document_id': 'int'}
    scores = np.array([row['score'] for row in data])
    assert (np.diff(scores) <= 0).all()
    assert len(data) == min(max_results, len(input_ds['dataset']))
    # assert data[0]['document_id'] == query_document_id
    # assert data[0]['score'] >= 0.99
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def plot_numvisc(diagfile):
    plt.figure()
    nc = Dataset(diagfile)
    t=nc.variables['t'][:]
    ke=nc.variables['ke'][:]
    dkdt=np.diff(ke)/np.diff(t)
    ens=nc.variables['enstrophy'][:]
    ensm=0.5*(ens[1:]+ens[:-1])
#    deltake[visc,res]=-(ke[-1]-ke[0])

#    deltaens[visc,res]=max(medfilt(ens,21))-ens[5]

    visc_tseries = -dkdt/ensm*4.4*np.pi
    visc_num = max(visc_tseries[t[1:]>0.02])
    #print('N=%4i / visc = %4.1e / num = %4.2e'%(N[res],Kdiff[visc],visc_num[res]))
    plt.semilogy(t[1:],visc_tseries)
    plt.xlabel('time')
    plt.ylabel('viscosity (-(1/2V)dE/dt)')
    plt.grid('on')
    plt.show()
项目:catchy    作者:jvbalen    | 项目源码 | 文件源码
def get_beats(x, sr):
    """Track beats in an audio excerpt, using librosa's standard
        beat tracker.

    Args:
        x (1d-array) audio signal, mono
        sr (int): sample rate

    Returns:
        2d-array: beat times and beat intervals
    """

    _, beat_frames = librosa.beat.beat_track(x, sr=sr)
    beat_times = librosa.frames_to_time(beat_frames, sr=sr)

    t = beat_times[:-1,]
    beat_intervals = np.diff(beat_times)

    return t, beat_intervals
项目:catchy    作者:jvbalen    | 项目源码 | 文件源码
def get_onsets(x, sr):
    """Compute inter-onset intervals (IOI) from audio, using librosa.

    Args:
        x (1d-array) audio signal, mono
        sr (int): sample rate

    Returns:
        2d-array: onset times and IOI
    """

    onset_frames = librosa.onset.onset_detect(x, sr=sr)
    onset_times = librosa.frames_to_time(onset_frames, sr=sr)

    t = onset_times[:-1,]
    onset_intervals = np.diff(onset_times)

    return t, onset_intervals
项目:piphat    作者:bschousek    | 项目源码 | 文件源码
def calcsteps(current_time=current_time, speedparms=speedparms):
    speed=np.linspace(speedparms[0], speedparms[1], len(current_time))
    expected=np.multiply(current_time,speed)
    steps=np.diff(np.floor(expected))
    steplocs=np.where(steps !=0)[0]
    steptimes=current_time[steplocs]
    stepdelta=np.diff(np.insert(steptimes,0,0))
    stepdir=speed[steplocs]>0
    deltap=np.sum(stepdir)-np.sum(np.invert(stepdir))
    full=False

    retval={'steplocs':steplocs,
            'steptimes':current_time[steplocs],
            'speeds': speed[steplocs],
            'stepdelta': stepdelta,
            'stepdir':stepdir,
            'deltap': deltap}
        #logging.debug('steplocs %r' %steplocs)
    #logging.debug('steptimes %r' %current_time[steplocs])
    #logging.debug('speeds %r' %speed[steplocs])

    #logging.debug('retval %r' %retval)
    return retval
    #return steps, current_time
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def existing_index_and_interval(self):
        indices = [i for i, f in self.existing_indices_and_files()]
        if len(indices) == 0:
            return None, 1
        elif len(indices) == 1:
            return indices[0], 1
        indices.sort()
        diff = np.diff(indices)
        interval = diff[0]
        return max(indices), interval
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def existing_index_and_interval(self):
        indices = [i for i, f in self.existing_indices_and_files()]
        if len(indices) == 0:
            return None, 1
        elif len(indices) == 1:
            return indices[0], 1
        indices.sort()
        diff = np.diff(indices)
        interval = diff[0]
        return max(indices), interval
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def existing_index_and_interval(self):
        indices = [i for i, f in self.existing_indices_and_files()]
        if len(indices) == 0:
            return None, 1
        elif len(indices) == 1:
            return indices[0], 1
        indices.sort()
        diff = np.diff(indices)
        interval = diff[0]
        return max(indices), interval
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def existing_index_and_interval(self):
        indices = [i for i, f in self.existing_indices_and_files()]
        if len(indices) == 0:
            return None, 1
        elif len(indices) == 1:
            return indices[0], 1
        indices.sort()
        diff = np.diff(indices)
        interval = diff[0]
        return max(indices), interval
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def existing_index_and_interval(self):
        indices = [i for i, f in self.existing_indices_and_files()]
        if len(indices) == 0:
            return None, 1
        elif len(indices) == 1:
            return indices[0], 1
        indices.sort()
        diff = np.diff(indices)
        interval = diff[0]
        return max(indices), interval
项目:lain    作者:llllllllll    | 项目源码 | 文件源码
def hit_object_angles(hit_objects, *, double_time=False, half_time=False):
    """Compute the angle from one hit object to the next in 3d space with time
    along the Z axis.

    Parameters
    ----------
    hit_objects : iterable[HitObject]
        The hit objects to compute the angles about.
    double_time : bool, optional
        Apply double time compression to the Z axis.
    half_time : bool, optional
        Apply half time expansion to the Z axis.

    Returns
    -------
    angles : ndarray[float]
        An array shape (3, len(hit_objects) - 1) of pitch, roll, and yaw
        between each hit object. All angles are measured in radians.
    """
    coords = hit_object_coordinates(
        hit_objects,
        double_time=double_time,
        half_time=half_time,
    )
    diff = np.diff(coords, axis=1)

    # (pitch, roll, yaw) x transitions
    out = np.empty((3, len(hit_objects) - 1), dtype=np.float64)
    np.arctan2(diff[Axis.y], diff[Axis.z], out=out[Angle.pitch])
    np.arctan2(diff[Axis.y], diff[Axis.x], out=out[Angle.roll])
    np.arctan2(diff[Axis.z], diff[Axis.x], out=out[Angle.yaw])

    return out
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def sparse_temporal_forward_pass(inputs, weights, biases = None, scales = None, hidden_activations='relu', output_activations = 'relu', quantization_method = 'herd', rng=None):
    """
    Feed a sequence of inputs into a sparse temporal difference net and get the resulting activations.

    :param inputs: A (n_frames, n_dims_in) array
    :param weights: A list of (n_dim_in, n_dim_out) weight matrices
    :param biases: An optional (len(weights)) list of (w.shape[1]) biases for each weight matrix
    :param scales: An optional (len(weights)) list of (w.shape[0]) scales to scale each layer before rounding.
    :param hidden_activations: Indicates the hidden layer activation function
    :param output_activations: Indicates the output layer activation function
    :return: activations:
        A len(weights)*3+1 list of (n_frames, n_dims) activations.
        Elements [::3] will be a length(w)+1 list containing the input to each rounding unit, and the final output
        Elements [1::3] will be the length(w) rounded "spike" signal.
        Elements [2::3] will be the length(w) inputs to each nonlinearity
    """
    activations = [inputs]
    if biases is None:
        biases = [0]*len(weights)
    if scales is None:
        scales = [1.]*len(weights)
    else:
        assert len(scales) in (len(weights), len(weights)+1)
    real_activations = inputs
    for w, b, k in zip(weights, biases, scales):
        deltas = np.diff(np.insert(real_activations, 0, 0, axis=0), axis=0)  # (n_steps, n_in)
        spikes = quantize_sequence(k*deltas, method=quantization_method, rng=rng)  # (n_steps, n_in)
        delta_inputs = (spikes/k).dot(w)  # (n_steps, n_out)
        cumulated_inputs = np.cumsum(delta_inputs, axis=0)+b  # (n_steps, n_out)
        real_activations = activation_function(cumulated_inputs, output_activations if w is weights[-1] else hidden_activations)  # (n_steps, n_out)
        activations += [spikes, cumulated_inputs, real_activations]
    if len(scales)==len(weights)+1:
        activations[-1]*=scales[-1]
    return activations
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def __diff(x):
    """
    First derivative/diff (while keeping same size as input)

    Args:
        x (array): numpy array of data

    Returns:
        dx (array): numpy array of first derivative of data
                      (same size as x)
    """
    dx = np.diff(x)
    dx = np.concatenate((dx[0], dx))  # output len == input len
    return dx
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _fourierTransform(self, x, y):
        ## Perform fourier transform. If x values are not sampled uniformly,
        ## then use np.interp to resample before taking fft.
        dx = np.diff(x)
        uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))
        if not uniform:
            x2 = np.linspace(x[0], x[-1], len(x))
            y = np.interp(x2, x, y)
            x = x2
        f = np.fft.fft(y) / len(y)
        y = abs(f[1:len(f)/2])
        dt = x[-1] - x[0]
        x = np.linspace(0, 0.5*len(x)/dt, len(y))
        return x, y
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __find_temperature(self,tree,mintemp,maxtemp,tempstep,min_clus):
        num_temp = int(floor(float(maxtemp-mintemp)/tempstep))
        aux = np.diff(tree[:,4])
        aux1 = np.diff(tree[:,5])
        aux2 = np.diff(tree[:,6])
        aux3 = np.diff(tree[:,7])
        temp=0;
        for t in range(0,num_temp-1):
            if(aux[t] > min_clus or aux1[t] > min_clus or aux2[t] > min_clus or aux3[t] >min_clus):
                temp=t+1

        if (temp==0 and tree[temp][5]<min_clus):
            temp=1

        return temp
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_sorting(self):
        """
        Test if presorting of columns work properly.
        """
        result = self.testIO.get_columns(sorting_columns=0)

        assert len(result) > 0
        assert all(np.diff(result[:, 0]) >= 0)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _fourierTransform(self, x, y):
        ## Perform fourier transform. If x values are not sampled uniformly,
        ## then use np.interp to resample before taking fft.
        dx = np.diff(x)
        uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))
        if not uniform:
            x2 = np.linspace(x[0], x[-1], len(x))
            y = np.interp(x2, x, y)
            x = x2
        f = np.fft.fft(y) / len(y)
        y = abs(f[1:len(f)/2])
        dt = x[-1] - x[0]
        x = np.linspace(0, 0.5*len(x)/dt, len(y))
        return x, y