Python numpy 模块,searchsorted() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.searchsorted()

项目:MulensModel    作者:rpoleski    | 项目源码 | 文件源码
def _methods_for_epochs(self):
        """
        for given epochs, decide which methods should be used to
        calculate magnification, but don't run the calculations
        """
        out = [self._default_method] * len(self.times)
        if self._methods_epochs is None:
            return out

        brackets = np.searchsorted(self._methods_epochs, self.times)
        n_max = len(self._methods_epochs)

        out = [self._methods_names[value - 1]
               if (value > 0 and value < n_max) else self._default_method
               for value in brackets]
        return out
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def normalize_spectrum(spectrum):
    ''' Normalizes a spectrum to have unit peak within the visible band.
    Args:
        spectrum (`Spectrum`): object with iterable wavelength, value fields.

    Returns:
        `Spectrum`: new spectrum object.

    '''
    wvl, vals = spectrum['wvl'], spectrum['values']
    low, high = np.searchsorted(wvl, 400), np.searchsorted(wvl, 700)
    vis_values_max = vals[low:high].max()
    return {
        'wvl': wvl,
        'values': vals / vis_values_max,
    }
项目:lps-anchor-pos-estimator    作者:bitcraze    | 项目源码 | 文件源码
def setdiff(eq1, eq2):

    eq1, eq2 = eqsize(eq1, eq2)

    c1 = [None] * eq1.shape
    c2 = [None] * eq2.shape

    for i in range(0, eq1.size):

        c1.append[i] = hash(eq2[i])

    for i in range(0, eq2.size):

        c2[i] = hash(eq2[i])

    ia = np.delete(np.arange(np.alen(c1)), np.searchsorted(c1, c2))

    ia = (ia[:]).conj().T

    p = eq1[ia]

    return p, ia
项目:MOSFiT    作者:guillochon    | 项目源码 | 文件源码
def process(self, **kwargs):
        """Process module."""
        self._rest_times = kwargs['rest_times']
        self._rest_t_explosion = kwargs[self.key('resttexplosion')]

        outputs = OrderedDict()
        max_times = max(self._rest_times)
        if max_times > self._rest_t_explosion:
            outputs['dense_times'] = np.unique(
                np.concatenate(([0.0], [
                    x + self._rest_t_explosion
                    for x in np.logspace(
                        self.L_T_MIN,
                        np.log10(max_times - self._rest_t_explosion),
                        num=self._n_times)
                ], self._rest_times)))
        else:
            outputs['dense_times'] = np.array(self._rest_times)
        outputs['dense_indices'] = np.searchsorted(
            outputs['dense_times'], self._rest_times)
        return outputs
项目:MOSFiT    作者:guillochon    | 项目源码 | 文件源码
def mm83(self, nh, waves):
        """X-ray extinction in the ISM from Morisson & McCammon 1983."""
        y = np.array([self.H_C_CGS / (x * self.ANG_CGS * self.KEV_CGS)
                      for x in waves])
        i = np.array([np.searchsorted(self._mm83[:, 0], x) - 1 for x in y])
        al = [1.0e-24 * (self._mm83[x, 1] + self._mm83[x, 2] * y[j] +
                         self._mm83[x, 3] * y[j] ** 2) / y[j] ** 3
              for j, x in enumerate(i)]
        # For less than 0.03 keV assume cross-section scales as E^-3.
        # http://ned.ipac.caltech.edu/level5/Madau6/Madau1_2.html
        # See also Rumph, Boyer, & Vennes 1994.
        al = [al[j] if x < self._min_xray
              else self._almin * (self._min_xray / x) ** 3
              for j, x in enumerate(y)]
        al = [al[j] if x > self._max_xray
              else self._almax * (self._max_xray / x) ** 3
              for j, x in enumerate(y)]
        return nh * np.array(al)
项目:hienoi    作者:christophercrouzet    | 项目源码 | 文件源码
def get_particle(self, id):
        """Retrieve a particle.

        Parameters
        ----------
        id : int
            ID of the particle to retrieve.

        Returns
        -------
        nani.Particle
            The particle found.
        """
        # PRECONDITION: `self._array.data` sorted by id.
        id = self._ATTR_ID_NUMPY_TYPE(id)
        idx = numpy.searchsorted(self._array.data['id'], id)
        if idx < len(self._array) and self._array.data[idx]['id'] == id:
            return self._nani.element_view(self._array.data[idx])

        raise ValueError("No particle found with ID '%d'." % (id,))
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def __getitem__(self, index):
        inds = self.indices
        vals = self.values
        if not isinstance(index, int):
            raise TypeError(
                "Indices must be of type integer, got type %s" % type(index))

        if index >= self.size or index < -self.size:
            raise IndexError("Index %d out of bounds." % index)
        if index < 0:
            index += self.size

        if (inds.size == 0) or (index > inds.item(-1)):
            return 0.

        insert_index = np.searchsorted(inds, index)
        row_ind = inds[insert_index]
        if row_ind == index:
            return vals[insert_index]
        return 0.
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def __getitem__(self, indices):
        i, j = indices
        if i < 0 or i >= self.numRows:
            raise IndexError("Row index %d is out of range [0, %d)"
                             % (i, self.numRows))
        if j < 0 or j >= self.numCols:
            raise IndexError("Column index %d is out of range [0, %d)"
                             % (j, self.numCols))

        # If a CSR matrix is given, then the row index should be searched
        # for in ColPtrs, and the column index should be searched for in the
        # corresponding slice obtained from rowIndices.
        if self.isTransposed:
            j, i = i, j

        colStart = self.colPtrs[j]
        colEnd = self.colPtrs[j + 1]
        nz = self.rowIndices[colStart: colEnd]
        ind = np.searchsorted(nz, i) + colStart
        if ind < colEnd and self.rowIndices[ind] == i:
            return self.values[ind]
        else:
            return 0.0
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def __getitem__(self, index):
        inds = self.indices
        vals = self.values
        if not isinstance(index, int):
            raise TypeError(
                "Indices must be of type integer, got type %s" % type(index))

        if index >= self.size or index < -self.size:
            raise IndexError("Index %d out of bounds." % index)
        if index < 0:
            index += self.size

        if (inds.size == 0) or (index > inds.item(-1)):
            return 0.

        insert_index = np.searchsorted(inds, index)
        row_ind = inds[insert_index]
        if row_ind == index:
            return vals[insert_index]
        return 0.
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def __getitem__(self, indices):
        i, j = indices
        if i < 0 or i >= self.numRows:
            raise IndexError("Row index %d is out of range [0, %d)"
                             % (i, self.numRows))
        if j < 0 or j >= self.numCols:
            raise IndexError("Column index %d is out of range [0, %d)"
                             % (j, self.numCols))

        # If a CSR matrix is given, then the row index should be searched
        # for in ColPtrs, and the column index should be searched for in the
        # corresponding slice obtained from rowIndices.
        if self.isTransposed:
            j, i = i, j

        colStart = self.colPtrs[j]
        colEnd = self.colPtrs[j + 1]
        nz = self.rowIndices[colStart: colEnd]
        ind = np.searchsorted(nz, i) + colStart
        if ind < colEnd and self.rowIndices[ind] == i:
            return self.values[ind]
        else:
            return 0.0
项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def add(self, arr):
        if not isinstance(arr, np.ndarray):
            arr = np.array(arr)
        arr = arr.flatten()

        self.min = min(self.min, arr.min())
        self.max = max(self.max, arr.max())
        self.sum += arr.sum()
        self.num += len(arr)
        self.sum_squares += (arr ** 2).sum()

        indices = np.searchsorted(self.bucket_limits, arr, side='right')
        new_counts = np.bincount(indices, minlength=self.buckets.shape[0])
        if new_counts.shape[0] > self.buckets.shape[0]:
            # This should only happen with nans and extremely large values
            assert new_counts.shape[0] == self.buckets.shape[0] + 1, new_counts.shape
            new_counts = new_counts[:self.buckets.shape[0]]
        self.buckets += new_counts
项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def add(self, arr):
        if not isinstance(arr, np.ndarray):
            arr = np.array(arr)
        arr = arr.flatten()

        self.min = min(self.min, arr.min())
        self.max = max(self.max, arr.max())
        self.sum += arr.sum()
        self.num += len(arr)
        self.sum_squares += (arr ** 2).sum()

        indices = np.searchsorted(self.bucket_limits, arr, side='right')
        new_counts = np.bincount(indices, minlength=self.buckets.shape[0])
        if new_counts.shape[0] > self.buckets.shape[0]:
            # This should only happen with nans and extremely large values
            assert new_counts.shape[0] == self.buckets.shape[0] + 1, new_counts.shape
            new_counts = new_counts[:self.buckets.shape[0]]
        self.buckets += new_counts
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _evaluate(self,x,y):
        '''
        Returns the level of the interpolated function at each value in x,y.
        Only called internally by HARKinterpolator2D.__call__ (etc).
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
            f = (1-alpha)*self.xInterpolators[y_pos-1](x) + alpha*self.xInterpolators[y_pos](x)
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            f = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
                        f[c] = (1-alpha)*self.xInterpolators[i-1](x[c]) + alpha*self.xInterpolators[i](x[c]) 
        return f
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derX(self,x,y):
        '''
        Returns the derivative with respect to x of the interpolated function
        at each value in x,y. Only called internally by HARKinterpolator2D.derivativeX.
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            alpha = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1])
            dfdx = (1-alpha)*self.xInterpolators[y_pos-1]._der(x) + alpha*self.xInterpolators[y_pos]._der(x)
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            dfdx = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        alpha = (y[c] - self.y_list[i-1])/(self.y_list[i] - self.y_list[i-1])
                        dfdx[c] = (1-alpha)*self.xInterpolators[i-1]._der(x[c]) + alpha*self.xInterpolators[i]._der(x[c])
        return dfdx
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derY(self,x,y):
        '''
        Returns the derivative with respect to y of the interpolated function
        at each value in x,y. Only called internally by HARKinterpolator2D.derivativeY.
        '''
        if _isscalar(x):
            y_pos = max(min(np.searchsorted(self.y_list,y),self.y_n-1),1)
            dfdy = (self.xInterpolators[y_pos](x) - self.xInterpolators[y_pos-1](x))/(self.y_list[y_pos] - self.y_list[y_pos-1])
        else:
            m = len(x)
            y_pos = np.searchsorted(self.y_list,y)
            y_pos[y_pos > self.y_n-1] = self.y_n-1
            y_pos[y_pos < 1] = 1
            dfdy = np.zeros(m) + np.nan
            if y.size > 0:
                for i in xrange(1,self.y_n):
                    c = y_pos == i
                    if np.any(c):
                        dfdy[c] = (self.xInterpolators[i](x[c]) - self.xInterpolators[i-1](x[c]))/(self.y_list[i] - self.y_list[i-1])
        return dfdy
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _evaluate(self,x,y,z):
        '''
        Returns the level of the interpolated function at each value in x,y,z.
        Only called internally by HARKinterpolator3D.__call__ (etc).
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            f = (1-alpha)*self.xyInterpolators[z_pos-1](x,y) + alpha*self.xyInterpolators[z_pos](x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            f = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        f[c] = (1-alpha)*self.xyInterpolators[i-1](x[c],y[c]) + alpha*self.xyInterpolators[i](x[c],y[c]) 
        return f
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derX(self,x,y,z):
        '''
        Returns the derivative with respect to x of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeX.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            dfdx = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeX(x,y) + alpha*self.xyInterpolators[z_pos].derivativeX(x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdx = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        dfdx[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeX(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeX(x[c],y[c]) 
        return dfdx
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derY(self,x,y,z):
        '''
        Returns the derivative with respect to y of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeY.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            alpha = (z - self.z_list[z_pos-1])/(self.z_list[z_pos] - self.z_list[z_pos-1])
            dfdy = (1-alpha)*self.xyInterpolators[z_pos-1].derivativeY(x,y) + alpha*self.xyInterpolators[z_pos].derivativeY(x,y)
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdy = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        alpha = (z[c] - self.z_list[i-1])/(self.z_list[i] - self.z_list[i-1])
                        dfdy[c] = (1-alpha)*self.xyInterpolators[i-1].derivativeY(x[c],y[c]) + alpha*self.xyInterpolators[i].derivativeY(x[c],y[c]) 
        return dfdy
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derZ(self,x,y,z):
        '''
        Returns the derivative with respect to z of the interpolated function
        at each value in x,y,z. Only called internally by HARKinterpolator3D.derivativeZ.
        '''
        if _isscalar(x):
            z_pos = max(min(np.searchsorted(self.z_list,z),self.z_n-1),1)
            dfdz = (self.xyInterpolators[z_pos].derivativeX(x,y) - self.xyInterpolators[z_pos-1].derivativeX(x,y))/(self.z_list[z_pos] - self.z_list[z_pos-1])
        else:
            m = len(x)
            z_pos = np.searchsorted(self.z_list,z)
            z_pos[z_pos > self.z_n-1] = self.z_n-1
            z_pos[z_pos < 1] = 1
            dfdz = np.zeros(m) + np.nan
            if x.size > 0:
                for i in xrange(1,self.z_n):
                    c = z_pos == i
                    if np.any(c):
                        dfdz[c] = (self.xyInterpolators[i](x[c],y[c]) - self.xyInterpolators[i-1](x[c],y[c]))/(self.z_list[i] - self.z_list[i-1])
        return dfdz
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def simBirth(self,which_agents):
        '''
        Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
        discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        IndShockConsumerType.simBirth(self,which_agents) # Get initial assets and permanent income
        N = np.sum(which_agents)
        base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
        Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
        self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int)
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def ecdf(x):
    """Empirical cumulative distribution function

    Given a 1D array of values, returns a function f(q) that outputs the
    fraction of values less than or equal to q.

    Parameters
    ----------
    x : 1D array
        values for which to compute CDF

    Returns
    ----------
    ecdf_fun: Callable[[float], float]
        function that returns the value of the CDF at a given point
    """
    xp = np.sort(x)
    yp = np.arange(len(xp) + 1) / len(xp)

    def ecdf_fun(q):
        return yp[np.searchsorted(xp, q, side="right")]

    return ecdf_fun
项目:textar    作者:datosgobar    | 项目源码 | 文件源码
def make_classifier(self, name, ids, labels):
        """Entrenar un clasificador SVM sobre los textos cargados.

        Crea un clasificador que se guarda en el objeto bajo el nombre `name`.

        Args:
            name (str): Nombre para el clasidicador.
            ids (list): Se espera una lista de N ids de textos ya almacenados
                en el TextClassifier.
            labels (list): Se espera una lista de N etiquetas. Una por cada id
                de texto presente en ids.
        Nota:
            Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_
        """
        if not all(np.in1d(ids, self.ids)):
            raise ValueError("Hay ids de textos que no se encuentran \
                              almacenados.")
        setattr(self, name, SGDClassifier())
        classifier = getattr(self, name)
        indices = np.searchsorted(self.ids, ids)
        classifier.fit(self.tfidf_mat[indices, :], labels)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def _global_lonlat2pix(self, lonlat):
        x = np.searchsorted(self._coords_x, lonlat[:, 0], side='right') - 1
        x = x.astype(int)
        ycoords = self._coords_y
        y = np.searchsorted(ycoords, lonlat[:, 1], side='right') - 1
        y = y.astype(int)

        # We want the *closed* interval, which means moving
        # points on the end back by 1
        on_end_x = lonlat[:, 0] == self._coords_x[-1]
        on_end_y = lonlat[:, 1] == self._coords_y[-1]
        x[on_end_x] -= 1
        y[on_end_y] -= 1
        if (not all(np.logical_and(x >= 0, x < self._full_res[0]))) or \
                (not all(np.logical_and(y >= 0, y < self._full_res[1]))):
            raise ValueError("Queried location is not "
                             "in the image {}!".format(self.source._filename))

        result = np.concatenate((x[:, np.newaxis], y[:, np.newaxis]), axis=1)
        return result

    # @contract(lonlat='array[Nx2](float64),N>0')
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def next_frame(self, event=None):
        num = int(self.numstr.text())
        cnum = self.class_num.checkedId() - 1
        if cnum == -1:
            num += 1
        else:
            points = np.where(self.classes.key_pos == cnum)[0]
            index = np.searchsorted(points, num, side='left')
            if num in points:
                index += 1
            if index > len(points) - 1:
                index = len(points) - 1
            num = points[index]

        if num < self.parent.num_frames:
            self.numstr.setText(str(num))
            self.plot_frame()
项目:IDNNs    作者:ravidziv    | 项目源码 | 文件源码
def update_line_each_neuron(num, print_loss, Ix, axes, Iy, train_data, accuracy_test, epochs_bins, loss_train_data, loss_test_data, colors, epochsInds,
                            font_size = 18, axis_font = 16, x_lim = [0,12.2], y_lim=[0, 1.08],x_ticks = [], y_ticks = []):
    """Update the figure of the infomration plane for the movie"""
    #Print the line between the points
    axes[0].clear()
    if len(axes)>1:
        axes[1].clear()
    #Print the points
    for layer_num in range(Ix.shape[2]):
        for net_ind in range(Ix.shape[0]):
            axes[0].scatter(Ix[net_ind,num, layer_num], Iy[net_ind,num, layer_num], color = colors[layer_num], s = 35,edgecolors = 'black',alpha = 0.85)
    title_str = 'Information Plane - Epoch number - ' + str(epochsInds[num])
    utils.adjustAxes(axes[0], axis_font, title_str, x_ticks, y_ticks, x_lim, y_lim, set_xlabel=True, set_ylabel=True,
                     x_label='$I(X;T)$', y_label='$I(T;Y)$')
    #Print the loss function and the error
    if len(axes)>1:
        axes[1].plot(epochsInds[:num], 1 - np.mean(accuracy_test[:, :num], axis=0), color='g')
        if print_loss:
            axes[1].plot(epochsInds[:num], np.mean(loss_test_data[:, :num], axis=0), color='y')
        nereast_val = np.searchsorted(epochs_bins, epochsInds[num], side='right')
        axes[1].set_xlim([0,epochs_bins[nereast_val]])
        axes[1].legend(('Accuracy', 'Loss Function'), loc='best')
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def minutes_window(self, start_dt, count):
        start_dt_nanos = start_dt.value
        all_minutes_nanos = self._trading_minutes_nanos
        start_idx = all_minutes_nanos.searchsorted(start_dt_nanos)

        # searchsorted finds the index of the minute **on or after** start_dt.
        # If the latter, push back to the prior minute.
        if all_minutes_nanos[start_idx] != start_dt_nanos:
            start_idx -= 1

        if start_idx < 0 or start_idx >= len(all_minutes_nanos):
            raise KeyError("Can't start minute window at {}".format(start_dt))

        end_idx = start_idx + count

        if start_idx > end_idx:
            return self.all_minutes[(end_idx + 1):(start_idx + 1)]
        else:
            return self.all_minutes[start_idx:end_idx]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _encode(self, s, V, context):
        """
        Arguments
        ----------
            s: Sentence as a list of strings
            V: Vocabulary as a np array of strings
            context: The maximum length of previous words to include
        """
        idxs = np.searchsorted(V, s)
        x = np.zeros((len(s)-1, context), dtype=np.int32)
        y = np.zeros((len(s)-1, 1), np.int32)
        for i in range(1, len(s)):
            x[i-1, :i] = idxs[:i][-context:] + 1  # 0 means missing value
            y[i-1] = idxs[i]

        return x, y
项目:jwalk    作者:jwplayer    | 项目源码 | 文件源码
def encode_edges(edges, nodes):
    """Encode data with dictionary

    Args:
        edges (np.ndarray): np array of the form [node1, node2].
        nodes (np.array): list of unique nodes

    Returns:
        np.ndarray: relabeled edges

    Examples:
        >>> import numpy as np
        >>> edges = np.array([['A', 'B'], ['A', 'C']])
        >>> nodes = np.array(['C', 'B', 'A'])
        >>> print(encode_edges(edges, nodes))
        [[2 1]
         [2 0]]
    """
    sidx = nodes.argsort()
    relabeled_edges = sidx[np.searchsorted(nodes, edges, sorter=sidx)]
    return relabeled_edges
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def get_spectrum(self, kT):
        """
        Get the thermal emission spectrum given a temperature *kT* in keV. 
        """
        cspec_l = np.zeros(self.nchan)
        mspec_l = np.zeros(self.nchan)
        cspec_r = np.zeros(self.nchan)
        mspec_r = np.zeros(self.nchan)
        tindex = np.searchsorted(self.Tvals, kT)-1
        if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
            return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s")
        dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
        # First do H,He, and trace elements
        for elem in self.cosmic_elem:
            cspec_l += self._make_spectrum(kT, elem, tindex+2)
            cspec_r += self._make_spectrum(kT, elem, tindex+3)
        # Next do the metals
        for elem in self.metal_elem:
            mspec_l += self._make_spectrum(kT, elem, tindex+2)
            mspec_r += self._make_spectrum(kT, elem, tindex+3)
        cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
        metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
        return cosmic_spec, metal_spec
项目:babusca    作者:georglind    | 项目源码 | 文件源码
def stateindex(state, hashes, sorter):
        """
        Converts state to hash and searches for the hash among hashes,
        which are sorted by the sorter list.

        Parameters
        ----------
        state : ndarray
            An array of one or more states
        hashes : ndarray
            List of hashes so search among
        sorter : ndarray
            Sorting indicies which sorts hashes
            (generated from Basis.argsort).
        """
        key = Basis.hash(state)
        return sorter[np.searchsorted(hashes, key, sorter=sorter)]
项目:DeepConvSep    作者:MTG    | 项目源码 | 文件源码
def getNextIndex(self):
        """
        Returns how many batches/sequences to load from each .data file
        """
        target_value = (self.scratch_index+1)*(self.batch_memory*self.batch_size)
        idx_target = np.searchsorted(self.num_points,target_value, side='right')
        if target_value>self.num_points[-1] or idx_target>=len(self.num_points):
            idx_target = idx_target - 2
            target_value = self.num_points[idx_target]
            self.idxend = self.num_points[idx_target] - self.num_points[idx_target-1]
            self.nindex = idx_target
        else:
            while target_value<=self.num_points[idx_target]:
                idx_target = idx_target - 1
            self.idxend = target_value - self.num_points[idx_target]
            self.nindex = idx_target
项目:DeepConvSep    作者:MTG    | 项目源码 | 文件源码
def getNextIndex(self):
        """
        Returns how many batches/sequences to load from each .data file
        """
        target_value = (self.scratch_index+1)*(self.batch_memory*self.batch_size)
        idx_target = np.searchsorted(self.num_points,target_value, side='right')
        if target_value>self.num_points[-1] or idx_target>=len(self.num_points):
            idx_target = idx_target - 2
            target_value = self.num_points[idx_target]
            self.idxend = self.num_points[idx_target] - self.num_points[idx_target-1]
            self.nindex = idx_target
        else:
            while target_value<=self.num_points[idx_target]:
                idx_target = idx_target - 1
            self.idxend = target_value - self.num_points[idx_target]
            self.nindex = idx_target
项目:jaylyrics_generation_tensorflow    作者:hundred06    | 项目源码 | 文件源码
def random_pick(p,word,sampling_type):
    def weighted_pick(weights):
        t = np.cumsum(weights)
        s = np.sum(weights)
        return(int(np.searchsorted(t, np.random.rand(1)*s)))

    if sampling_type == 'argmax':
        sample = np.argmax(p)
    elif sampling_type == 'weighted': 
        sample = weighted_pick(p)
    elif sampling_type == 'combined':
        if word == ' ':
            sample = weighted_pick(p)
        else:
            sample = np.argmax(p)
    return sample


# test code
项目:Thrifty    作者:swkrueger    | 项目源码 | 文件源码
def reldist_linpol(tx_soa, beacon_soa):
    # Interpolate between two nearest beacon samples
    beacon_rx0, beacon_rx1 = beacon_soa[:, 0], beacon_soa[:, 1]
    tx_rx0, tx_rx1 = tx_soa[:, 0], tx_soa[:, 1]

    high_idx = np.searchsorted(beacon_rx0, tx_rx0)
    low_idx = high_idx - 1
    length = len(beacon_soa[:, 0])
    if high_idx[-1] >= length:
        high_idx[-1] = length - 1
    if low_idx[0] < 0:
        high_idx[0] = 0

    weight = ((tx_rx0 - beacon_rx0[low_idx]) /
              (beacon_rx0[high_idx] - beacon_rx0[low_idx]))
    weight[np.isinf(weight)] = 1  # remove nan
    # Reldist in samples
    reldist = (tx_rx1 - (beacon_rx1[low_idx] * (1-weight) +
                         beacon_rx1[high_idx] * weight))  # / 2.0
    return reldist
项目:P4J    作者:phuijse    | 项目源码 | 文件源码
def weighted_quantile(x, weights, quantile):
    I = np.argsort(x)
    sort_x = x[I]
    sort_w = weights[I]
    acum_w = np.add.accumulate(sort_w)
    norm_w = (acum_w - 0.5*sort_w)/acum_w[-1] 
    interpq = np.searchsorted(norm_w, [quantile])[0] 
    if interpq == 0:
        return sort_x[0]
    elif interpq == len(x):
        return sort_x[-1]
    else:
        tmp1 = (norm_w[interpq] - quantile)/(norm_w[interpq] - norm_w[interpq-1])
        tmp2 = (quantile - norm_w[interpq-1])/(norm_w[interpq] - norm_w[interpq-1])
        assert tmp1>=0 and tmp2>=0 and tmp1<=1 and tmp2<=1 
        return sort_x[interpq-1]*tmp1 + sort_x[interpq]*tmp2
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def transform(self, y):
        """Transform labels to normalized encoding.
        Parameters
        ----------
        y : array-like of shape [n_samples]
            Target values.
        Returns
        -------
        y : array-like of shape [n_samples]
        """
        y = column_or_1d(y, warn=True)

        classes = np.unique(y)
        if len(np.intersect1d(classes, self.classes_)) < len(classes):
            diff = np.setdiff1d(classes, self.classes_)
            self.classes_ = np.hstack((self.classes_, diff))
        return np.searchsorted(self.classes_, y)[0]
项目:pyxsim    作者:jzuhone    | 项目源码 | 文件源码
def get_spectrum(self, kT):
        """
        Get the thermal emission spectrum given a temperature *kT* in keV. 
        """
        tindex = np.searchsorted(self.Tvals, kT)-1
        if tindex >= self.Tvals.shape[0]-1 or tindex < 0:
            return (YTArray(np.zeros(self.nchan), "cm**3/s"),)*2
        dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
        cspec_l = self.cosmic_spec[tindex, :]
        mspec_l = self.metal_spec[tindex, :]
        cspec_r = self.cosmic_spec[tindex+1, :]
        mspec_r = self.metal_spec[tindex+1, :]
        cosmic_spec = cspec_l*(1.-dT)+cspec_r*dT
        metal_spec = mspec_l*(1.-dT)+mspec_r*dT
        var_spec = None
        if self.var_spec is not None:
            vspec_l = self.var_spec[:, tindex, :]
            vspec_r = self.var_spec[:, tindex+1, :]
            var_spec = vspec_l*(1.-dT) + vspec_r*dT
        return cosmic_spec, metal_spec, var_spec
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_resample_group_info(self):  # GH10914
        for n, k in product((10000, 100000), (10, 100, 1000)):
            dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
            ts = Series(np.random.randint(0, n // k, n).astype('int64'),
                        index=np.random.choice(dr, n))

            left = ts.resample('30T').nunique()
            ix = date_range(start=ts.index.min(), end=ts.index.max(),
                            freq='30T')

            vals = ts.values
            bins = np.searchsorted(ix.values, ts.index, side='right')

            sorter = np.lexsort((vals, bins))
            vals, bins = vals[sorter], bins[sorter]

            mask = np.r_[True, vals[1:] != vals[:-1]]
            mask |= np.r_[True, bins[1:] != bins[:-1]]

            arr = np.bincount(bins[mask] - 1,
                              minlength=len(ix)).astype('int64', copy=False)
            right = Series(arr, index=ix)

            assert_series_equal(left, right)
项目:QuantEcon.lectures.code    作者:QuantEcon    | 项目源码 | 文件源码
def find_cutoff_rule(self, J):
        """
        This function takes a value function and returns the corresponding
        cutoffs of where you transition between continue and choosing a
        specific model
        """
        payoff_choose_f0 = self.payoff_choose_f0
        payoff_choose_f1 = self.payoff_choose_f1
        m, pgrid = self.m, self.pgrid

        # Evaluate cost at all points on grid for choosing a model
        p_c_0 = payoff_choose_f0(pgrid)
        p_c_1 = payoff_choose_f1(pgrid)

        # The cutoff points can be found by differencing these costs with
        # the Bellman equation (J is always less than or equal to p_c_i)
        lb = pgrid[np.searchsorted(p_c_1 - J, 1e-10) - 1]
        ub = pgrid[np.searchsorted(J - p_c_0, -1e-10)]

        return (lb, ub)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def _get_streams_index_by_time(self, local_time):
        if self.is_stream:
            cidx  = numpy.searchsorted(self._times, local_time, 'right') - 1
            return cidx
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def is_first_chunk(self, idx, nb_chunks):

        if self.is_stream:
            cidx = numpy.searchsorted(self._chunks_in_sources, idx, 'right') - 1
            idx -= self._chunks_in_sources[cidx]
            if idx == 0:
                return True
        else:
            if idx == 0:
                return True
        return False
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def get_data(self, idx, chunk_size, padding=(0, 0), nodes=None):

        if self.is_stream:
            cidx = numpy.searchsorted(self._chunks_in_sources, idx, 'right') - 1
            idx -= self._chunks_in_sources[cidx]
            return self._sources[cidx].read_chunk(idx, chunk_size, padding, nodes), self._sources[cidx].t_start + idx*chunk_size
        else:
            return self.read_chunk(idx, chunk_size, padding, nodes), self.t_start + idx*chunk_size
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def find_within_ordmag(x, baseline_idx):
    x_ascending = np.sort(x)
    baseline = x_ascending[-baseline_idx]
    cutoff = max(1, round(0.1*baseline))
    # Return the index corresponding to the cutoff in descending order
    return len(x) - np.searchsorted(x_ascending, cutoff)
项目:wmd-relax    作者:src-d    | 项目源码 | 文件源码
def _common_vocabulary_batch(self, words1, weights1, i2):
        words2, weights2 = self._get_vocabulary(i2)
        joint, index = numpy.unique(numpy.concatenate((words1, words2)),
                                    return_index=True)
        nw1 = numpy.zeros(len(joint), dtype=numpy.float32)
        cmp = index < len(words1)
        nw1[numpy.nonzero(cmp)] = weights1[index[cmp]]
        nw2 = numpy.zeros(len(joint), dtype=numpy.float32)
        nw2[numpy.searchsorted(joint, words2)] = weights2
        return joint, nw1, nw2
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _get_selected_ids(self, gid, id_column, time_column, t_start, t_stop,
                          time_unit, data):
        """
        Calculates the data range to load depending on the selected gid
        and the provided time range (t_start, t_stop)

        gid: int, gid to be loaded.
        id_column: int, id of the column containing gids.
        time_column: int, id of the column containing times.
        t_start: pq.quantity.Quantity, start of the time range to load.
        t_stop: pq.quantity.Quantity, stop of the time range to load.
        time_unit: pq.quantity.Quantity, time unit of the data to load.
        data: numpy array, data to load.

        Returns
        list of selected gids
        """
        gid_ids = np.array([0, data.shape[0]])
        if id_column is not None:
            gid_ids = np.array([np.searchsorted(data[:, 0], gid, side='left'),
                                np.searchsorted(data[:, 0], gid, side='right')])
        gid_data = data[gid_ids[0]:gid_ids[1], :]

        # select only requested time range
        id_shifts = np.array([0, 0])
        if time_column is not None:
            id_shifts[0] = np.searchsorted(gid_data[:, 1],
                                           t_start.rescale(
                                                   time_unit).magnitude,
                                           side='left')
            id_shifts[1] = (np.searchsorted(gid_data[:, 1],
                                            t_stop.rescale(
                                                    time_unit).magnitude,
                                            side='left') - gid_data.shape[0])

        selected_ids = gid_ids + id_shifts
        return selected_ids
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _get_selected_ids(self, gid, id_column, time_column, t_start, t_stop,
                          time_unit, data):
        """
        Calculates the data range to load depending on the selected gid
        and the provided time range (t_start, t_stop)

        gid: int, gid to be loaded.
        id_column: int, id of the column containing gids.
        time_column: int, id of the column containing times.
        t_start: pq.quantity.Quantity, start of the time range to load.
        t_stop: pq.quantity.Quantity, stop of the time range to load.
        time_unit: pq.quantity.Quantity, time unit of the data to load.
        data: numpy array, data to load.

        Returns
        list of selected gids
        """
        gid_ids = np.array([0, data.shape[0]])
        if id_column is not None:
            gid_ids = np.array([np.searchsorted(data[:, 0], gid, side='left'),
                                np.searchsorted(data[:, 0], gid, side='right')])
        gid_data = data[gid_ids[0]:gid_ids[1], :]

        # select only requested time range
        id_shifts = np.array([0, 0])
        if time_column is not None:
            id_shifts[0] = np.searchsorted(gid_data[:, 1],
                                           t_start.rescale(
                                                   time_unit).magnitude,
                                           side='left')
            id_shifts[1] = (np.searchsorted(gid_data[:, 1],
                                            t_stop.rescale(
                                                    time_unit).magnitude,
                                            side='left') - gid_data.shape[0])

        selected_ids = gid_ids + id_shifts
        return selected_ids
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def test_ecdf_formal(x, data):
    correct = np.searchsorted(np.sort(data), x, side='right') / len(data)
    assert np.allclose(dcst.ecdf_formal(x, data), correct, atol=atol,
                       equal_nan=True)