Python numpy 模块,cumsum() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.cumsum()

项目:dask_gdf    作者:gpuopenanalytics    | 项目源码 | 文件源码
def reset_index(self):
        """Reset index to range based
        """
        dfs = self.to_delayed()
        sizes = np.asarray(compute(*map(delayed(len), dfs)))
        prefixes = np.zeros_like(sizes)
        prefixes[1:] = np.cumsum(sizes[:-1])

        @delayed
        def fix_index(df, startpos):
            return df.set_index(np.arange(start=startpos,
                                          stop=startpos + len(df),
                                          dtype=np.intp))

        outdfs = [fix_index(df, startpos)
                  for df, startpos in zip(dfs, prefixes)]
        return from_delayed(outdfs)
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def scalar_weight_updates(xc, ec, kp, kd):

    tx_last = 0
    te_last = 0
    x_last = 0
    e_last = 0
    n_samples = xc.shape[0]
    dw = np.zeros(n_samples)
    r = kd/float(kp+kd)

    for t in xrange(n_samples):

        t_last = max(tx_last, te_last)
        if xc[t]:
            dw[t] = x_last*e_last*r**(2*t_last-tx_last-te_last)*geosum(r**2, t_end=t-t_last+1, t_start=1)
            x_last = x_last * r**(t-tx_last) + xc[t]
            tx_last = t
        if ec[t]:
            dw[t] = x_last*e_last*r**(2*t_last-tx_last-te_last)*geosum(r**2, t_end=t-t_last+1, t_start=1)  # THing...
            e_last = e_last * r**(t-te_last) + ec[t]
            te_last = t
    return np.cumsum(dw)/kd**2
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def pd_weight_grads_future(xc, ec, kp, kd):
    r = kd/float(kp+kd)
    kd=float(kd)
    scale_factor = 1./float(kp**2 + 2*kp*kd)
    n_samples = xc.shape[0]
    assert n_samples == ec.shape[0]
    n_in = xc.shape[1]
    n_out = ec.shape[1]
    dws = np.zeros((n_samples, n_in, n_out))
    xr = np.zeros(n_in)
    er = np.zeros(n_out)
    for t in xrange(n_samples):
        xr *= r
        er = er*r + ec[t]
        dws[t] = (xc[t][:, None]*er[None, :] + xr[:, None]*ec[t][None, :]) * scale_factor
        xr += xc[t]
        # er += ec[t]
    return np.cumsum(dws, axis=0)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _train_pca(self, training_set):
    """Trains and returns a LinearMachine that is trained using PCA"""
    data = numpy.vstack(feature for feature in training_set)

    logger.info("  -> Training LinearMachine using PCA ")
    trainer = bob.learn.linear.PCATrainer()
    machine, eigen_values = trainer.train(data)

    if isinstance(self.subspace_dimension_pca, float):
      cummulated = numpy.cumsum(eigen_values) / numpy.sum(eigen_values)
      for index in range(len(cummulated)):
        if cummulated[index] > self.subspace_dimension_pca:
          self.subspace_dimension_pca = index
          break
      self.subspace_dimension_pca = index

    # limit number of pcs
    logger.info("  -> limiting PCA subspace to %d dimensions", self.subspace_dimension_pca)
    machine.resize(machine.shape[0], self.subspace_dimension_pca)
    return machine
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def train_projector(self, training_features, projector_file):
    """Generates the PCA covariance matrix and writes it into the given projector_file.

    **Parameters:**

    training_features : [1D :py:class:`numpy.ndarray`]
      A list of 1D training arrays (vectors) to train the PCA projection matrix with.

    projector_file : str
      A writable file, into which the PCA projection matrix (as a :py:class:`bob.learn.linear.Machine`) and the eigenvalues will be written.
    """
    # Assure that all data are 1D
    [self._check_feature(feature) for feature in training_features]

    # Initializes the data
    data = numpy.vstack(training_features)
    logger.info("  -> Training LinearMachine using PCA")
    t = bob.learn.linear.PCATrainer()
    self.machine, self.variances = t.train(data)
    # For re-shaping, we need to copy...
    self.variances = self.variances.copy()

    # compute variance percentage, if desired
    if isinstance(self.subspace_dim, float):
      cummulated = numpy.cumsum(self.variances) / numpy.sum(self.variances)
      for index in range(len(cummulated)):
        if cummulated[index] > self.subspace_dim:
          self.subspace_dim = index
          break
      self.subspace_dim = index
    logger.info("    ... Keeping %d PCA dimensions", self.subspace_dim)
    # re-shape machine
    self.machine.resize(self.machine.shape[0], self.subspace_dim)
    self.variances.resize(self.subspace_dim)

    f = bob.io.base.HDF5File(projector_file, "w")
    f.set("Eigenvalues", self.variances)
    f.create_group("Machine")
    f.cd("/Machine")
    self.machine.save(f)
项目:multimodal_varinf    作者:tmoer    | 项目源码 | 文件源码
def split(x, split_dim, split_sizes):
    n = len(list(x.get_shape()))
    dim_size = np.sum(split_sizes)
    assert int(x.get_shape()[split_dim]) == dim_size
    ids = np.cumsum([0] + split_sizes)
    ids[-1] = -1
    begin_ids = ids[:-1]

    ret = []
    for i in range(len(split_sizes)):
        cur_begin = np.zeros([n], dtype=np.int32)
        cur_begin[split_dim] = begin_ids[i]
        cur_end = np.zeros([n], dtype=np.int32) - 1
        cur_end[split_dim] = split_sizes[i]
        ret += [tf.slice(x, cur_begin, cur_end)]
    return ret
项目:CRN_ProbabilisticInversion    作者:elaloy    | 项目源码 | 文件源码
def GenCR(MCMCPar,pCR):

    if type(pCR) is np.ndarray:
        p=np.ndarray.tolist(pCR)[0]
    else:
        p=pCR
    CR=np.zeros((MCMCPar.seq * MCMCPar.steps),dtype=np.float)
    L =  np.random.multinomial(MCMCPar.seq * MCMCPar.steps, p, size=1)
    L2 = np.concatenate((np.zeros((1),dtype=np.int), np.cumsum(L)),axis=0)

    r = np.random.permutation(MCMCPar.seq * MCMCPar.steps)

    for zz in range(0,MCMCPar.nCR):

        i_start = L2[zz]
        i_end = L2[zz+1]
        idx = r[i_start:i_end]
        CR[idx] = np.float(zz+1)/MCMCPar.nCR

    CR = np.reshape(CR,(MCMCPar.seq,MCMCPar.steps))
    return CR, L
项目:CRN_ProbabilisticInversion    作者:elaloy    | 项目源码 | 文件源码
def DEStrategy(DEpairs,seq):

    # Determine which sequences to evolve with what DE strategy

    # Determine probability of selecting a given number of pairs
    p_pair = (1.0/DEpairs) * np.ones((1,DEpairs))
    p_pair = np.cumsum(p_pair)
    p_pair = np.concatenate((np.zeros((1)),p_pair),axis=0)

    DEversion=np.zeros((seq),dtype=np.int32)
    Z = np.random.rand(seq)
    # Select number of pairs
    for qq in range(0,seq):
        z = np.where(p_pair<=Z[qq])
        DEversion[qq] = z[0][-1]

    return DEversion
项目:prbg    作者:Lakate    | 项目源码 | 文件源码
def cumultativesumstest(binin):
    ''' The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences.  This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large.'''
    n = len(binin)
    ss = [int(el) for el in binin]
    sc = map(sumi, ss)
    cs = np.cumsum(sc)
    z = max(abs(cs))
    ra = 0
    start = int(np.floor(0.25 * np.floor(-n / z) + 1))
    stop = int(np.floor(0.25 * np.floor(n / z) - 1))
    pv1 = []
    for k in xrange(start, stop + 1):
        pv1.append(sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)) - sst.norm.cdf((4 * k - 1) * z / np.sqrt(n)))
    start = int(np.floor(0.25 * np.floor(-n / z - 3)))
    stop = int(np.floor(0.25 * np.floor(n / z) - 1))
    pv2 = []
    for k in xrange(start, stop + 1):
        pv2.append(sst.norm.cdf((4 * k + 3) * z / np.sqrt(n)) - sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)))
    pval = 1
    pval -= reduce(su, pv1)
    pval += reduce(su, pv2)

    return pval
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def exampleLrmGrid(nC, exType):
    assert type(nC) == list, "nC must be a list containing the number of nodes"
    assert len(nC) == 2 or len(nC) == 3, "nC must either two or three dimensions"
    exType = exType.lower()

    possibleTypes = ['rect', 'rotate']
    assert exType in possibleTypes, "Not a possible example type."

    if exType == 'rect':
        return list(ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False))
    elif exType == 'rotate':
        if len(nC) == 2:
            X, Y = ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False)
            amt = 0.5-np.sqrt((X - 0.5)**2 + (Y - 0.5)**2)
            amt[amt < 0] = 0
            return [X + (-(Y - 0.5))*amt, Y + (+(X - 0.5))*amt]
        elif len(nC) == 3:
            X, Y, Z = ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False)
            amt = 0.5-np.sqrt((X - 0.5)**2 + (Y - 0.5)**2 + (Z - 0.5)**2)
            amt[amt < 0] = 0
            return [X + (-(Y - 0.5))*amt, Y + (-(Z - 0.5))*amt, Z + (-(X - 0.5))*amt]
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        ba = [1, 2, 10, 11, 6, 5, 4]
        ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
        for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
                      np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
            a = np.array(ba, ctype)
            a2 = np.array(ba2, ctype)

            tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
            assert_array_equal(np.cumsum(a, axis=0), tgt)

            tgt = np.array(
                [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
            assert_array_equal(np.cumsum(a2, axis=0), tgt)

            tgt = np.array(
                [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
            assert_array_equal(np.cumsum(a2, axis=1), tgt)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
        self.comments = comments
        # Delimiter is a character
        if isinstance(delimiter, unicode):
            delimiter = delimiter.encode('ascii')
        if (delimiter is None) or _is_bytes_like(delimiter):
            delimiter = delimiter or None
            _handyman = self._delimited_splitter
        # Delimiter is a list of field widths
        elif hasattr(delimiter, '__iter__'):
            _handyman = self._variablewidth_splitter
            idx = np.cumsum([0] + list(delimiter))
            delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
        # Delimiter is a single integer
        elif int(delimiter):
            (_handyman, delimiter) = (
                    self._fixedwidth_splitter, int(delimiter))
        else:
            (_handyman, delimiter) = (self._delimited_splitter, None)
        self.delimiter = delimiter
        if autostrip:
            self._handyman = self.autostrip(_handyman)
        else:
            self._handyman = _handyman
    #
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def map_index(self, index):
        # Get a list of lengths of all datasets. Say the answer is [4, 3, 3],
        # and we're looking for index = 5.
        len_list = list(map(len, self.datasets))
        # Cumulate to a numpy array. The answer is [4, 7, 10]
        cumulative_len_list = np.cumsum(len_list)
        # When the index is subtracted, we get [-1, 2, 5]. We're looking for the (index
        # of the) first cumulated len which is larger than the index (in this case,
        # 7 (index 1)).
        offset_cumulative_len_list = cumulative_len_list - index
        dataset_index = np.argmax(offset_cumulative_len_list > 0)
        # With the dataset index, we figure out the index in dataset
        if dataset_index == 0:
            # First dataset - index corresponds to index_in_dataset
            index_in_dataset = index
        else:
            # Get cumulated length up to the current dataset
            len_up_to_dataset = cumulative_len_list[dataset_index - 1]
            # Compute index_in_dataset as that what's left
            index_in_dataset = index - len_up_to_dataset
        return dataset_index, index_in_dataset
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def cumsum(a, axis=None, dtype=None, out=None):
    """Returns the cumulative sum of an array along a given axis.

    Args:
        a (cupy.ndarray): Input array.
        axis (int): Axis along which the cumulative sum is taken. If it is not
        specified, the input is flattened.
        dtype: Data type specifier.
        out (cupy.ndarray): Output array.

    Returns:
        cupy.ndarray: The result array.

    .. seealso:: :func:`numpy.cumsum`

    """
    return _cum_core(a, axis, dtype, out, _cumsum_kern, _cumsum_batch_kern)
项目:astk    作者:openalea-incubator    | 项目源码 | 文件源码
def __call__(self, time_sequence, weather_data):
        """ Compute thermal time accumulation over time_sequence

        :Parameters:
        ----------
        - `time_sequence` (panda dateTime index)
            A sequence of TimeStamps indicating the dates of all elementary time steps of the simulation
        - weather (alinea.astk.Weather instance)
            A Weather database

        """    
        try:
            Tair = weather_data.temperature_air[time_sequence]
        except:
            #strange extract needed on visualea 1.0 (to test again with ipython in visualea)
            T_data = weather_data[['temperature_air']]
            Tair = numpy.array([float(T_data.loc[d]) for d in time_sequence])
        Tcut = numpy.maximum(numpy.zeros_like(Tair), Tair - self.Tbase)
        days = [0] + [((t - time_sequence[0]).total_seconds()+ 3600) / 3600 / 24 for t in time_sequence]
        dt = numpy.diff(days).tolist()
        return numpy.cumsum(Tcut * dt)

# functional call for nodes
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def accumulate_grad(self):
        if self.n_backward == 0:
            self.model.zerograds()
        # Compute losses
        losses = []
        for r_seq, log_prob_seq, ent_seq in zip(self.reward_sequences,
                                                self.log_prob_sequences,
                                                self.entropy_sequences):
            assert len(r_seq) - 1 == len(log_prob_seq) == len(ent_seq)
            # Convert rewards into returns (=sum of future rewards)
            R_seq = np.cumsum(list(reversed(r_seq[1:])))[::-1]
            for R, log_prob, entropy in zip(R_seq, log_prob_seq, ent_seq):
                loss = -R * log_prob - self.beta * entropy
                losses.append(loss)
        total_loss = chainerrl.functions.sum_arrays(losses)
        # When self.batchsize is future.types.newint.newint, dividing a
        # Variable with it will raise an error, so it is manually converted to
        # float here.
        total_loss /= float(self.batchsize)
        total_loss.backward()
        self.reward_sequences = [[]]
        self.log_prob_sequences = [[]]
        self.entropy_sequences = [[]]
        self.n_backward += 1
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def _to_notesequences(self, samples):
    output_sequences = []
    dim_ranges = np.cumsum(self._split_output_depths)
    for s in samples:
      mel_ns = self._melody_converter.to_notesequences(
          [s[:, :dim_ranges[0]]])[0]
      bass_ns = self._melody_converter.to_notesequences(
          [s[:, dim_ranges[0]:dim_ranges[1]]])[0]
      drums_ns = self._drums_converter.to_notesequences(
          [s[:, dim_ranges[1]:]])[0]

      for n in bass_ns.notes:
        n.instrument = 1
        n.program = ELECTRIC_BASS_PROGRAM
      for n in drums_ns.notes:
        n.instrument = 9

      ns = mel_ns
      ns.notes.extend(bass_ns.notes)
      ns.notes.extend(drums_ns.notes)
      ns.total_time = max(
          mel_ns.total_time, bass_ns.total_time, drums_ns.total_time)
      output_sequences.append(ns)
    return output_sequences
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def sample_categorical(pmf):
  """Sample from a categorical distribution.

  Args:
    pmf: Probablity mass function. Output of a softmax over categories.
      Array of shape [batch_size, number of categories]. Rows sum to 1.

  Returns:
    idxs: Array of size [batch_size, 1]. Integer of category sampled.
  """
  if pmf.ndim == 1:
    pmf = np.expand_dims(pmf, 0)
  batch_size = pmf.shape[0]
  cdf = np.cumsum(pmf, axis=1)
  rand_vals = np.random.rand(batch_size)
  idxs = np.zeros([batch_size, 1])
  for i in range(batch_size):
    idxs[i] = cdf[i].searchsorted(rand_vals[i])
  return idxs
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def simBirth(self,which_agents):
        '''
        Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
        discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        IndShockConsumerType.simBirth(self,which_agents) # Get initial assets and permanent income
        N = np.sum(which_agents)
        base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
        Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
        self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int)
项目:paragraph2vec    作者:thunlp    | 项目源码 | 文件源码
def clip_spectrum(s, k, discard=0.001):
    """
    Given eigenvalues `s`, return how many factors should be kept to avoid
    storing spurious (tiny, numerically instable) values.

    This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).

    The returned value is clipped against `k` (= never return more than `k`).
    """
    # compute relative contribution of eigenvalues towards the energy spectrum
    rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
    # ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
    small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
    k = min(k, small) # clip against k
    logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
                (k, 100 * rel_spectrum[k - 1]))
    return k
项目:financial_life    作者:MartinPyka    | 项目源码 | 文件源码
def plot_stack_cum(semantic, *reports, color_theme = C_cold_colors, color_offset = 0):
    """ Creates a stacked plot with cumulated sums for a given semantic """ 
    X, Y, c = extract_data(semantic, *reports, color_theme = color_theme, color_offset = color_offset)
    Y = remove_nones(Y)
    # add zeros only at the beginning    
    for x in X:
        x.insert(0, x[0] - 1)

    for data in Y:
        for d in data:
            d.insert(0, 0.)

    # create the cumulated sum
    Y = [np.cumsum(np.array(yi), 1) if yi else [] for yi in Y]

    plot_stack_generic(X, Y, c, semantic, *reports, color_theme = color_theme, color_offset = color_offset)
    legend(loc = 'upper left', fancybox=True, framealpha=0.4, prop={'size':10})
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def get_segment_vote_accuracy(segment_label, segment_predictions, window):
    def gen():
        count = {
            label: np.hstack([[0], np.cumsum(segment_predictions == label)])
            for label in set(segment_predictions)
        }
        tmp = window
        if tmp == -1:
            tmp = len(segment_predictions)
        tmp = min(tmp, len(segment_predictions))
        for begin in range(len(segment_predictions) - tmp + 1):
            yield segment_label == max(
                count,
                key=lambda label: count[label][begin + tmp] - count[label][begin]
            ), segment_label
    return list(gen())
项目:Dragonfly    作者:duaneloh    | 项目源码 | 文件源码
def _parse_headers(self):
        self.num_data_list = []
        self.ones_accum_list = []
        self.multi_accum_list = []
        self.num_pix = []

        for i, photons_file in enumerate(self.photons_list):
            with open(photons_file, 'rb') as f:
                num_data = np.fromfile(f, dtype='i4', count=1)[0]
                self.num_pix.append(np.fromfile(f, dtype='i4', count=1)[0])
                if self.num_pix[i] != len(self.geom_list[i].x):
                    sys.stderr.write('Warning: num_pix for %s is different (%d vs %d)\n' % (photons_file, self.num_pix[i], len(self.geom_list[i].x)))
                f.seek(1024, 0)
                ones = np.fromfile(f, dtype='i4', count=num_data)
                multi = np.fromfile(f, dtype='i4', count=num_data)
            self.num_data_list.append(num_data)
            self.ones_accum_list.append(np.cumsum(ones))
            self.multi_accum_list.append(np.cumsum(multi))

        self.num_data_list = np.cumsum(self.num_data_list)
        self.num_frames = self.num_data_list[-1]
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def fit_koff(nmax=523, NN=4e8, **params):
    tbind = params.pop("tbind")
    params["kd"] = 1e9/tbind
    dx = params.pop("dx")
    rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True)
    rw.domains[1].dx = dx
    times = draw_empirically(rw, N=NN, nmax=nmax, success=False)
    bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35)
    #bins = np.logspace(-3., 2., 35)
    hist, _ = np.histogram(times, bins=bins)
    cfd = np.cumsum(hist)/float(np.sum(hist))
    t = 0.5*(bins[:-1] + bins[1:])
    tmean = times.mean()
    toff = NLS(t, cfd, t0=tmean)
    koff = 1./toff
    return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff)

##### run rw in collect mode and draw bindings from empirical distributions
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:NANHM-for-GEC    作者:shinochin    | 项目源码 | 文件源码
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
项目:polara    作者:Evfro    | 项目源码 | 文件源码
def csc_matvec(mat_csc, vec, dense_output=True, dtype=None):
    v_nnz = vec.indices
    v_val = vec.data

    m_val = mat_csc.data
    m_ind = mat_csc.indices
    m_ptr = mat_csc.indptr

    res_dtype = dtype or np.result_type(mat_csc.dtype, vec.dtype)
    if dense_output:
        res = np.zeros((mat_csc.shape[0],), dtype=res_dtype)
        matvec2dense(m_ptr, m_ind, m_val, v_nnz, v_val, res)
    else:
        sizes = m_ptr.take(v_nnz+1) - m_ptr.take(v_nnz)
        sizes = np.concatenate(([0], np.cumsum(sizes)))
        n = sizes[-1]
        data = np.empty((n,), dtype=res_dtype)
        indices = np.empty((n,), dtype=np.intp)
        indptr = np.array([0, n], dtype=np.intp)
        matvec2sparse(m_ptr, m_ind, m_val, v_nnz, v_val, sizes, indices, data)
        res = sp.sparse.csr_matrix((data, indices, indptr), shape=(1, mat_csc.shape[0]), dtype=res_dtype)
        res.sum_duplicates() # expensive operation
    return res
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def plot_loss(loss_list, log_dir, iter_id):
  def running_mean(x, N):
    cumsum = np.cumsum(np.insert(x, 0, 0))
    return (cumsum[N:] - cumsum[:-N]) / N
  plt.figure()
  plt.semilogy(loss_list, '.', alpha=0.2, label="Loss")
  plt.semilogy(running_mean(loss_list,100), label="Average Loss")
  plt.xlabel('Iterations')
  plt.ylabel('Loss')
  plt.legend()
  plt.grid()
  ax = plt.subplot(111)
  ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
        ncol=3, fancybox=True, shadow=True)
  plt.savefig(log_dir + "/fig_loss_iter_" + str(iter_id) + ".pdf")
  print("figure plotted")
  plt.close()
项目:motif    作者:rabitt    | 项目源码 | 文件源码
def _moving_average(self, a):
        """Compute the moving average of a signal.

        Parameters
        ----------
        a : np.array
            Signal

        Returns
        -------
        a_avg : np.array
            Moving average of signal.

        """
        n = self.avg_filt_len
        ret = np.cumsum(a, dtype=float)
        ret[n:] = ret[n:] - ret[:-n]
        return ret[n - 1:] / n
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def PCAdo(block, name):
    cor_ = np.corrcoef(block.T)
    eig_vals, eig_vecs = np.linalg.eig(cor_)
    tot = sum(eig_vals)
    var_exp = [(i / tot) * 100 for i in sorted(eig_vals, reverse=True)]
    cum_var_exp = np.cumsum(var_exp)
    loadings = (eig_vecs * np.sqrt(eig_vals))

    eig_vals = np.sort(eig_vals)[::-1]
    print('Eigenvalues')
    print(eig_vals)
    print('Variance Explained')
    print(var_exp)
    print('Total Variance Explained')
    print(cum_var_exp)
    print('Loadings')
    print(abs(loadings[:, 0]))

    PAcorrect = PA(block.shape[0], block.shape[1])

    print('Parallel Analisys')
    pa = (eig_vals - (PAcorrect - 1))
    print(pa)

    print('Correlation Matrix')
    print(pd.DataFrame.corr(block))

    plt.plot(range(1,len(pa)+1), pa, '-o')
    plt.grid(True)
    plt.xlabel('Fatores')
    plt.ylabel('Componentes')

    plt.savefig('imgs/PCA' + name, bbox_inches='tight')
    plt.clf()
    plt.cla()
#    plt.show()
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def init_ecdf(self):
        if self.count == 0:
            raise Exception('Need to add items before you can call init_ecdf()')
        self.init_prob()
        self._ecdf = np.cumsum( self._prob )
        return

    # Precondition: must call init_prob() after last add() operation
项目:rank-ordered-autoencoder    作者:paulbertens    | 项目源码 | 文件源码
def reconstruct(self):
        # reconstruct progressively
        self.reconstruction[:, self.output_ranks] = np.cumsum(
            self.weights[:, self.output_ranks] * self.output[:, self.output_ranks], axis=1)
        self.activation(self.reconstruction, out=self.reconstruction)
项目:rank-ordered-autoencoder    作者:paulbertens    | 项目源码 | 文件源码
def get_sparsity_error_term(self):
        # the total error function being minimized
        return np.sum(np.cumsum(self.get_reconstruction_error_vector()[::-1]))
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def moving_average(a, n=3):
    ret = np.cumsum(a)
    ret[n:] = ret[n:] - ret[:-n]
    return ret[n - 1:] / n
项目:ProtScan    作者:gianlucacorrado    | 项目源码 | 文件源码
def fit_optimal(blocks, keys):
    """Fit the cumulative distribution, by optimization."""
    values = list()
    for k in keys:
        values.extend([max_val for (_, _, max_val) in blocks[k]])
    hist, bin_edges = np.histogram(values, bins=100, normed=True)
    bin_centers = np.array(
        [(bin_edges[i - 1] + bin_edges[i]) / 2
         for i in range(1, len(bin_edges))])
    cumulative = np.cumsum(hist) / np.sum(hist)
    popt, _ = optimize.curve_fit(func, bin_centers, cumulative)
    return popt
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def pd_weight_grads(xc, ec, kp, kd):
    """
    Efficiently compute the weights over time for a system recieving sparse inputs xc and ec.

    :param xc: An (n_samples, n_in) array of input spikes
    :param ec:  An (n_samples, n_in) array of error_spikes
    :param kp: A scalar kp value
    :param kd: A scalar kd value
    :return: An (n_samples, n_in, n_out) array of weight updates
    """
    r = kd/float(kp+kd)
    n_samples = xc.shape[0]
    assert n_samples == ec.shape[0]
    n_in = xc.shape[1]
    n_out = ec.shape[1]
    dws = np.zeros((n_samples, n_in, n_out))
    tx_last = np.zeros(n_in)
    te_last = np.zeros(n_out)
    x_last = np.zeros(n_in)
    e_last = np.zeros(n_out)
    for t in xrange(n_samples):
        x_spikes = xc[t] != 0

        t_last = np.maximum(tx_last[x_spikes, None], te_last)
        dws[t, x_spikes, :] = x_last[x_spikes, None] * e_last * r**(2*t_last-tx_last[x_spikes, None]-te_last)*geosum(r**2, t_end=t-t_last, t_start=1)  # Not 100% on this...
        x_last[x_spikes] = x_last[x_spikes]*r**(t-tx_last[x_spikes]) + xc[t][x_spikes] / float(kd)
        tx_last[x_spikes] = t
        e_spikes = ec[t] != 0
        if np.any(e_spikes):
            t_last = np.maximum(tx_last[:, None], te_last[e_spikes])
            dws[t, :, e_spikes] += (x_last[:, None] * e_last[e_spikes] * r**(2*t_last-tx_last[:, None]-te_last[e_spikes])*geosum(r**2, t_end=t-t_last, t_start=1)).T  # T makes no sense here but
            e_last[e_spikes] = e_last[e_spikes]*r**(t-te_last[e_spikes]) + ec[t][e_spikes] / float(kd)
            te_last[e_spikes] = t
    return np.cumsum(dws, axis=0)
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def pd_weight_grads_mult(xc, ec, kp, kd):
    """
    Efficiently compute the weights over time for a system recieving sparse inputs xc and ec.

    We use a slightly different approach this time - instead of keeping the last values and spike
    times for each neuron, we

    :param xc: An (n_samples, n_in) array of input spikes
    :param ec:  An (n_samples, n_in) array of error_spikes
    :param kp: A scalar kp value
    :param kd: A scalar kd value
    :return: An (n_samples, n_in, n_out) array of weight updates
    """
    # TODO: Make this work and pass The Test
    r = kd/float(kp+kd)
    kd=float(kd)
    n_samples = xc.shape[0]
    assert n_samples == ec.shape[0]
    n_in = xc.shape[1]
    n_out = ec.shape[1]
    dws = np.zeros((n_samples, n_in, n_out))
    xr = np.zeros(n_in)
    xi = np.zeros(n_in)
    er = np.zeros(n_out)
    ei = np.zeros(n_out)
    for t in xrange(n_samples):
        xr = r*xr + xc[t]/kd**2
        er = r*er + ec[t]/kd**2
        xi = r*xi*(1-(ec[t]!=0)) + xr
        ei = r*ei*((1-xc[t]!=0)) + er
        # dws[t] = xc[t, :, None]*ei[None, :] + xi[:, None]*ec[t, None, :]
        dws[t] = xc[t, :, None]*ei[None, :] + xi[:, None]*ec[t, None, :]
    return np.cumsum(dws, axis=0)*kd
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def sequential_quantize(v, n_steps = None, method='herd', rng = None):
    """
    :param v: A (..., n_samples, n_units, ) array
    :param n_steps: The number of steps to spike for
    :return: An (..., n_steps, n_units) array of quantized values
    """
    rng = get_rng(rng)
    assert v.ndim>=2
    if n_steps is None:
        n_steps = v.shape[-2]
    else:
        assert n_steps == v.shape[-2]

    if method=='herd':
        result = fixed_diff(np.round(np.cumsum(v, axis=-2)), axis=-2)
    elif method=='herd2':
        result = fixed_diff(fixed_diff(np.round(np.cumsum(np.cumsum(v, axis=-2), axis=-2)), axis=-2), axis=-2)
    elif method=='round':
        result = np.round(v)
    elif method == 'slippery.9':
        result = slippery_round(v, slip=0.9)
    elif method == 'slippery.5':
        result = slippery_round(v, slip=0.5)
    elif method == 'randn':
        result = v + rng.randn(*v.shape)
    elif method=='uniform':
        result = v + rng.uniform(-.5, .5, size=v.shape)
    elif method=='surrogate-noise':
        result = v + (12**.5)*((v%1)-(v%1)**2)*rng.uniform(low=-.5, high=.5, size=v.shape)
    elif method == 'surrogate-sqrt':
        result = v + np.sqrt((12**.5)*((v%1)-(v%1)**2)*rng.uniform(low=-.5, high=.5, size=v.shape))
    elif method is None:
        result = v
    else:
        raise NotImplementedError("Don't have quantization method '%s' implemented" % (method, ))
    return result
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def pitch_strength_all_candidates(f_erbs, L, pc):
    """
    Calculates the pitch ``strength'' of all candidate
    pitches

    Args:
        f_erbs (array): frequencies in ERBs
        L     (matrix): loudness matrix
        pc     (array): pitch candidates array

    Returns:
        S   (array): strength of pitches corresponding to pc's
    """
    # create pitch strength matrix
    S = np.zeros((pc.size, L.shape[1]))

    # define integration regions
    k = np.zeros(pc.size+1)

    for j in range(k.size-1):
        idx = int(k[j])
        f = f_erbs[idx:]
        val = find(f > pc[j] / 4)[0]
        k[j+1] = k[j] + val

    k = k[1:]  # TODO: fix this sloppiness

    # create loudness normalization matrix
    N = np.sqrt(np.flipud(np.cumsum(np.flipud(L * L), 0)))
    for j in range(pc.size):
        # normalize loudness
        n = N[int(k[j]), :]
        n[n == 0] = -np.inf  # to make zero-loudness equal zero after normalization
        nL = L[int(k[j]):] / np.tile(n, (int(L.shape[0] - k[j]), 1))

        # compute pitch strength
        S[j] = pitch_strength_one_candidate(f_erbs[int(k[j]):], nL, pc[j])

    return S
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _train_pca(self, training_set):
    """Trains and returns a LinearMachine that is trained using PCA"""
    data_list = (feature for client in training_set for feature in client)
    data = numpy.vstack(data_list)

    logger.info("  -> Training Linear Machine using PCA")
    t = bob.learn.linear.PCATrainer()
    machine, eigen_values = t.train(data)

    if isinstance(self.pca_subspace, float):
      cummulated = numpy.cumsum(eigen_values) / numpy.sum(eigen_values)
      for index in range(len(cummulated)):
        if cummulated[index] > self.pca_subspace:
          self.pca_subspace = index
          break
      self.pca_subspace = index

    if self.lda_subspace is not None and self.pca_subspace <= self.lda_subspace:
      logger.warn("  ... Extending the PCA subspace dimension from %d to %d", self.pca_subspace, self.lda_subspace + 1)
      self.pca_subspace = self.lda_subspace + 1
    else:
      logger.info("  ... Limiting PCA subspace to %d dimensions", self.pca_subspace)

    # limit number of pcs
    machine.resize(machine.shape[0], self.pca_subspace)
    return machine
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _interpolate_write_with_cursive(glyphs, inum, theta, noise, offset_size):
  stack = row_stack(glyphs)
  ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
  gamma = theta + cumsum((1.0-2.0*random(len(ig)))*noise)
  dd = column_stack((cos(gamma), sin(gamma)))*offset_size
  a = ig + dd
  b = ig + dd[:,::-1]*array((1,-1))

  return a, b
项目:higlass-server    作者:hms-dbmi    | 项目源码 | 文件源码
def get_frag_by_loc(
    cooler_file,
    loci,
    dim,
    zoomout_level=0,
    balanced=True,
    padding=None,
    percentile=100.0,
    ignore_diags=0,
    no_normalize=False
):
    with h5py.File(cooler_file, 'r') as f:
        c = get_cooler(f, zoomout_level)

        # Calculate the offsets once
        resolution = c.info['bin-size']
        chromsizes = np.ceil(c.chromsizes / resolution).astype(int)
        offsets = np.cumsum(chromsizes) - chromsizes

        fragments = collect_frags(
            c,
            loci,
            dim,
            resolution,
            offsets,
            padding=padding,
            balanced=balanced,
            percentile=percentile,
            ignore_diags=ignore_diags,
            no_normalize=no_normalize
        )

    return fragments
项目:higlass-server    作者:hms-dbmi    | 项目源码 | 文件源码
def abs2genomic(chromsizes, start_pos, end_pos):
    abs_chrom_offsets = np.r_[0, np.cumsum(chromsizes.values)]
    cid_lo, cid_hi = np.searchsorted(abs_chrom_offsets,
                                     [start_pos, end_pos],
                                     side='right') - 1
    rel_pos_lo = start_pos - abs_chrom_offsets[cid_lo]
    rel_pos_hi = end_pos - abs_chrom_offsets[cid_hi]
    start = rel_pos_lo
    for cid in range(cid_lo, cid_hi):
        yield cid, start, chromsizes[cid]
        start = 0
    yield cid_hi, start, rel_pos_hi
项目:slda    作者:Savvysherpa    | 项目源码 | 文件源码
def _create_edges(self, y, order='tail'):
        y.sort(order=order)
        _docs, _counts = np.unique(y[order], return_counts=True)
        counts = np.zeros(self.n_docs)
        counts[_docs] = _counts
        docs = np.ascontiguousarray(
            np.concatenate(([0], np.cumsum(counts))), dtype=np.intc)
        edges = np.ascontiguousarray(y['index'].flatten(), dtype=np.intc)
        return docs, edges
项目:slda    作者:Savvysherpa    | 项目源码 | 文件源码
def _create_edges(self, y, order='tail'):
        y.sort(order=order)
        _docs, _counts = np.unique(y[order], return_counts=True)
        counts = np.zeros(self.n_docs)
        counts[_docs] = _counts
        docs = np.ascontiguousarray(
            np.concatenate(([0], np.cumsum(counts))), dtype=np.intc)
        edges = np.ascontiguousarray(y['index'].flatten(), dtype=np.intc)
        return docs, edges
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _compress_svd_l(self, rank, relerr, svdfunc):
        """Compresses the MPA in place from right to left using SVD;
        yields a right-canonical state

        See :func:`~compress` for more details and arguments.

        """
        assert rank > 0, "Cannot compress to rank={}".format(rank)
        assert (relerr is None) or ((0. <= relerr) and (relerr <= 1.)), \
            "relerr={} not allowed".format(relerr)

        for site in range(len(self) - 1, 0, -1):
            ltens = self._lt[site]
            matshape = (ltens.shape[0], -1)
            if relerr is None:
                u, sv, v = svdfunc(ltens.reshape(matshape), rank)
                rank_t = len(sv)
            else:
                u, sv, v = svd(ltens.reshape(matshape))
                svsum = np.cumsum(sv) / np.sum(sv)
                rank_relerr = np.searchsorted(svsum, 1 - relerr) + 1
                rank_t = min(ltens.shape[0], v.shape[0], rank, rank_relerr)

            yield sv, rank_t

            newtens = (matdot(self._lt[site - 1], u[:, :rank_t] * sv[None, :rank_t]),
                       v[:rank_t, :].reshape((rank_t, ) + ltens.shape[1:]))
            self._lt.update(slice(site - 1, site + 1), newtens,
                            canonicalization=(None, 'right'))

        yield np.sum(np.abs(self._lt[0])**2)