Python numpy 模块,nonzero() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.nonzero()

项目:brats17    作者:xf4j    | 项目源码 | 文件源码
def generate_patch_probs(path, patch_locations, patch_size, im_size):
    x, y, z = patch_locations
    seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
    p = []
    for i in range(len(x)):
        for j in range(len(y)):
            for k in range(len(z)):
                patch = seg[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
                            int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
                            int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2)]
                patch = (patch > 0).astype(np.float32)
                percent = np.sum(patch) / (patch_size * patch_size * patch_size)
                p.append((1 - np.abs(percent - 0.5)) * percent)
    p = np.asarray(p, dtype=np.float32)
    p[p == 0] = np.amin(p[np.nonzero(p)])
    p = p / np.sum(p)
    return p
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def callback_rect(self, eclick, erelease):
        xmin, xmax, ymin, ymax = eclick.xdata, erelease.xdata, eclick.ydata, erelease.ydata
        if xmin > xmax:
            xmin, xmax = xmax, xmin
        if ymin > ymax:
            ymin, ymax = ymax, ymin
        x, y = self.x_position, self.y_position
        in_selection = ((x >= xmin) & (x <= xmax) &
                        (y >= ymin) & (y <= ymax))
        indices = np.nonzero(in_selection)[0]
        add_or_remove = None
        if erelease.key == 'shift':
            add_or_remove = 'add'
        elif erelease.key == 'control':
            add_or_remove = 'remove'
        self.update_inspect(indices, add_or_remove)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_bc_counts(genomes, genes, molecule_counter):
    genome_ids = molecule_counter.get_column('genome')
    genome_index = cr_reference.get_genome_index(genomes)
    conf_mapped_reads = molecule_counter.get_column('reads')
    barcodes = molecule_counter.get_column('barcode')

    bc_counts = {}
    for genome in genomes:
        genome_id = cr_reference.get_genome_id(genome, genome_index)
        genome_indices = genome_ids == genome_id
        if genome_indices.sum() == 0:
            # edge case - there's no data for this genome (e.g. empty sample, false barnyard sample, or nothing confidently mapped)
            continue
        bcs_for_genome = barcodes[genome_indices]
        # only count UMIs with at least one conf mapped read
        umi_conf_mapped_to_genome = conf_mapped_reads[genome_indices] > 0
        bc_breaks = bcs_for_genome[1:] - bcs_for_genome[:-1]
        bc_breaks = np.concatenate(([1], bc_breaks)) # first row is always a break
        bc_break_indices = np.nonzero(bc_breaks)[0]
        unique_bcs = bcs_for_genome[bc_break_indices]
        umis_per_bc = np.add.reduceat(umi_conf_mapped_to_genome, bc_break_indices)
        cmb_reads_per_bc = np.add.reduceat(conf_mapped_reads[genome_indices], bc_break_indices)
        bc_counts[genome] = (unique_bcs, umis_per_bc, cmb_reads_per_bc)

    return bc_counts
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def primes_2_to_n(n):
    """
    Efficient algorithm to find and list primes from
    2 to `n'.

    Args:
        n (int): highest number from which to search for primes

    Returns:
        np array of all primes from 2 to n

    References:
        Robert William Hanks,
        https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/
    """
    sieve = np.ones(int(n / 3 + (n % 6 == 2)), dtype=np.bool)
    for i in range(1, int((n ** 0.5) / 3 + 1)):
        if sieve[i]:
            k = 3 * i + 1 | 1
            sieve[int(k * k / 3)::2 * k] = False
            sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = False
    return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def top_uncer_items(adata, pp, n, flag = None):
    """
    Return top a flag list of top n uncertain item that not flag
    """
    uncertain = np.abs(pp[:,0] - 0.5)

    if flag != None:
        addition = np.asarray(flag, dtype = int)*10# flagged items are not consider, increase their value
        uncertain = uncertain + addition

    if len(uncertain) <= n:
        return np.nonzero(uncertain <= 10000000)[0]

    sorted_uncertain = np.sort(uncertain)

    thresh = sorted_uncertain[n]
    return np.nonzero(uncertain <= thresh)[0]
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def items_for_expert(adata, pp, n, flag):
    """
    take n items for expert to consider
    """
    combined_prob = 0.8*np.asarray(adata.taken_crowd_prob) + 0.2*pp[:,1]
    uncertain = np.abs(combined_prob - 0.5)

    if flag != None:
        addition = np.asarray(flag, dtype = int)*10# flagged items are not consider, increase their value
        uncertain = uncertain + addition

    if len(uncertain) <= n:
        return np.nonzero(uncertain <= 10000000)[0]

    sorted_uncertain = np.sort(uncertain)

    thresh = sorted_uncertain[n]
    return np.nonzero(uncertain <= thresh)[0]
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def balance_dataset(dataset_0, labels_0, dataset_1, labels_1, ratio=1):
    """Balance the dataset_0 with samples from dataset_1 up to given ratio.

    Args:
        dataset_0: array of text samples
        labels_0: array of labels for dataset_0
        dataset_1: array of text samples
        labels_1: array of labels for dataset_1
        ratio: ratio of samples of class 1 to samples of class 0 (default 1.0)

    Returns:
        balanced array of text samples, corresponding array of labels
    """
    initial_train_size = dataset_0.shape[0]
    insult_inds = np.nonzero(labels_1)[0]
    num_insults_0 = len(np.nonzero(labels_0)[0])
    num_insults_1 = len(np.nonzero(labels_1)[0])
    insult_inds_to_add = insult_inds[np.random.randint(low=0, high=num_insults_1,
                                                       size=(ratio * (initial_train_size - num_insults_0) - num_insults_0))]
    result = dataset_0.append(dataset_1.iloc[insult_inds_to_add])
    result_labels = labels_0.append(labels_1.iloc[insult_inds_to_add])
    return result, result_labels
项目:palladio    作者:slipguru    | 项目源码 | 文件源码
def retrieve_features(best_estimator):
    """Retrieve selected features from any estimator.

    In case it has the 'get_support' method, use it.
    Else, if it has a 'coef_' attribute, assume it's a linear model and the
    features correspond to the indices of the coefficients != 0
    """
    if hasattr(best_estimator, 'get_support'):
        return np.nonzero(best_estimator.get_support())[0]
    elif hasattr(best_estimator, 'coef_'):
        # print best_estimator.coef_
        if best_estimator.coef_.ndim > 1 and 1 not in best_estimator.coef_.shape:
            sel_feats = []
            for dim in range(best_estimator.coef_.ndim):
                sel_feats += np.nonzero(
                    best_estimator.coef_[dim])[0].ravel().tolist()
            return np.unique(sel_feats)
        return np.nonzero(best_estimator.coef_.flatten())[0]
    else:
        # Raise an error
        raise AttributeError('The best_estimator object does not have '
                             'neither the `coef_` attribute nor the '
                             '`get_support` method')
项目:geco_data    作者:stefco    | 项目源码 | 文件源码
def _get_missing_m_trend(self, pad='DEFAULT_PAD', **kwargs):
        """Get a single second of missing data."""
        logging.debug('Fetching missing m-trend: {}'.format(self))
        missing_buf = self.fetch() # explicitly fetch from NDS2
        trend = self.channel.split('.')[1].split(',')[0]
        # make m-trend value for this minute based on trend extension
        if len(np.nonzero(missing_buf == -1)[0]) != 0:
            # this won't actually check for anything at the moment because
            # gwpy.timeseries.TimeSeries.fetch() does not have a padding option
            # yet
            logging.warn('Still missing data in {}'.format(self))
        elif trend == 'mean':
            buf_trend = missing_buf.mean()
        elif trend == 'min':
            buf_trend = missing_buf.min()
        elif trend == 'max':
            buf_trend = missing_buf.max()
        elif trend == 'rms':
            buf_trend = missing_buf.rms(60)[0]
        elif trend == 'n':
            buf_trend = missing_buf.sum()
        else:
            raise ValueError('Unrecognized trend type: {}'.format(trend))
        return buf_trend
项目:geco_data    作者:stefco    | 项目源码 | 文件源码
def plot_timeseries(self, ax, **kwargs):
        """Scale up by 10^9 since plots are in ns, not seconds.
        Remove any indices considered bad in ``plot_properties``"""
        # define the variables for our plots
        y = np.delete(self.plot_vars.means - self.trend,
                      self.bad_indices.means) / SEC_PER['ns']
        t = np.delete(self.t_axis, self.bad_indices.means)
        yerr = np.delete(self.plot_vars.stds,
                         self.bad_indices.means) / SEC_PER['ns']
        mint = np.delete(self.t_axis, self.bad_indices.mins)
        miny = np.delete(self.plot_vars.mins - self.trend,
                         self.bad_indices.mins) / SEC_PER['ns']
        maxt = np.delete(self.t_axis, self.bad_indices.maxs)
        maxy = np.delete(self.plot_vars.maxs - self.trend,
                         self.bad_indices.maxs) / SEC_PER['ns']
        # plot everything, but only if the plotted data has nonzero length
        # in order to avoid an annoying matplotlib bug when adding legends.
        if len(t) != 0:
            ax.errorbar(t, y, marker="o", color="green", linestyle='none',
                        yerr=yerr, label="Means +/- Std. Dev.")
        if len(mint) != 0:
            ax.scatter(mint, miny, marker="^", color="blue", label="Minima")
        if len(maxt) != 0:
            ax.scatter(maxt, maxy, marker="v", color="red", label="Maxima")
项目:geco_data    作者:stefco    | 项目源码 | 文件源码
def plot_timeseries(self, ax, **kwargs):
        """Scale up by 10^9 since plots are in ns, not seconds.
        Remove any indices considered bad in ``plot_properties``"""
        # define the variables for our plots
        t = np.delete(self.t_axis, self.bad_indices.means)
        y = np.delete(self.plot_vars.means - self.trend,
                      self.bad_indices.means) / SEC_PER['ns']
        yerr = np.delete(self.plot_vars.stds,
                         self.bad_indices.means) / SEC_PER['ns']
        mint = np.delete(self.t_axis, self.bad_indices.absmins)
        miny = np.delete(self.plot_vars.absmins - self.trend,
                         self.bad_indices.absmins) / SEC_PER['ns']
        maxt = np.delete(self.t_axis, self.bad_indices.absmaxs)
        maxy = np.delete(self.plot_vars.absmaxs - self.trend,
                         self.bad_indices.absmaxs) / SEC_PER['ns']
        # plot everything, but only if the plotted data has nonzero length
        # in order to avoid an annoying matplotlib bug when adding legends.
        if len(t) != 0:
            ax.errorbar(t, y, marker="o", color="green", linestyle='none',
                        yerr=yerr, label="Means +/- Std. Dev.")
        if len(mint) != 0:
            ax.scatter(mint,miny,marker="^", color="blue", label="Abs. Minima")
        if len(maxt) != 0:
            ax.scatter(maxt,maxy,marker="v", color="red", label="Abs. Maxima")
项目:IntelAct-Vizdoom    作者:chendagui16    | 项目源码 | 文件源码
def __act_manual(self, state_meas):
        if len(self.__measure_for_manual):
            # [AMMO2, AMMO3, AMMO4, AMMO5, AMMO6, AMMO7, WEAPON2,
            # WEAPON3 WEAPON4 WEAPON5 WEAPON6 WEAPON7 SELECTED_WEAPON]
            assert len(self.__measure_for_manual) == 13
            # [SELECT_WEAPON2 SELECT_WEAPON3 SELECT_WEAPON4 SELECT_WEAPON5 SELECT_WEAPON6 SELECT_WEAPON7]
            curr_action = np.zeros((state_meas.shape[0], self.__num_manual_controls), dtype=np.int)
            for ns in range(state_meas.shape[0]):
                curr_ammo = state_meas[ns, self.__measure_for_manual[:6]]
                curr_weapons = state_meas[ns, self.__measure_for_manual[6:12]]
                if self.verbose:
                    print 'current ammo:', curr_ammo
                    print 'current weapons:', curr_weapons
                available_weapons = np.logical_and(curr_ammo >= np.array([1, 2, 1, 1, 1, 40]), curr_weapons)
                if any(available_weapons):
                    best_weapon = np.nonzero(available_weapons)[0][-1]
                    if not state_meas[ns, self.__measure_for_manual[12]] == best_weapon + 2:
                        curr_action[ns, best_weapon] = 1
            return curr_action
        else:
            return []
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
        s = np.ones(10, dtype=float)
        x = np.array((15,), dtype=float)

        def ia(x, s, v):
            x[(s > 0)] = v

        # After removing deprecation, the following are ValueErrors.
        # This might seem odd as compared to the value error below. This
        # is due to the fact that the new code always uses "nonzero" logic
        # and the boolean special case is not taken.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', DeprecationWarning)
            warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
            self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
            self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
        # Old special case (different code path):
        self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
        self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_nonzero_twodim(self):
        x = np.array([[0, 1, 0], [2, 0, 3]])
        assert_equal(np.count_nonzero(x), 3)
        assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))

        x = np.eye(3)
        assert_equal(np.count_nonzero(x), 3)
        assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))

        x = np.array([[(0, 1), (0, 0), (1, 11)],
                   [(1, 1), (1, 0), (0, 0)],
                   [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
        assert_equal(np.count_nonzero(x['a']), 4)
        assert_equal(np.count_nonzero(x['b']), 5)
        assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
        assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))

        assert_(not x['a'].T.flags.aligned)
        assert_equal(np.count_nonzero(x['a'].T), 4)
        assert_equal(np.count_nonzero(x['b'].T), 5)
        assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
        assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_half_ordering(self):
        """Make sure comparisons are working right"""

        # All non-NaN float16 values in reverse order
        a = self.nonan_f16[::-1].copy()

        # 32-bit float copy
        b = np.array(a, dtype=float32)

        # Should sort the same
        a.sort()
        b.sort()
        assert_equal(a, b)

        # Comparisons should work
        assert_((a[:-1] <= a[1:]).all())
        assert_(not (a[:-1] > a[1:]).any())
        assert_((a[1:] >= a[:-1]).all())
        assert_(not (a[1:] < a[:-1]).any())
        # All != except for +/-0
        assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
        assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
项目:pysapc    作者:bioinfocao    | 项目源码 | 文件源码
def denseToSparseAbvCutoff(self, denseMatrix, cutoff):
        """
        Remove datas in denseMatrix that is below cutoff, Convert the remaining datas into sparse matrix.
        Parameters:
        ----------------------
        denseMatrix: dense numpy matrix

        cutoff: int or float

        Returns
        ----------------------
        Scipy csr_matrix

        """
        maskArray=denseMatrix>=cutoff
        sparseMatrix=csr_matrix( (np.asarray(denseMatrix[maskArray]).reshape(-1),np.nonzero(maskArray)),\
                    shape=denseMatrix.shape)
        return sparseMatrix
项目:house-of-enlightenment    作者:house-of-enlightenment    | 项目源码 | 文件源码
def get_starting_location(self):
        rows_with_points = self.hits.max(axis=1)
        max_row = np.nonzero(rows_with_points)[0].max()
        # TODO: if max_row == 215, we should exit
        row = self.hits[max_row, :]
        idx = np.nonzero(row)[0]
        col = np.random.choice(idx)
        col = np.random.randint(col - 2, col + 2)
        loc = [min(STATE.layout.rows - 1, max_row + 10), col]
        assert self.max_row <= max_row
        self.max_row = max_row
        return loc

    #def get_starting_location(self):
    #    rows_with_points = self.hits.max(axis=1)
    #    max_row = np.nonzero(rows_with_points)[0].max()
    #    loc = [min(STATE.layout.rows - 1, max_row + 10), np.random.randint(STATE.layout.columns)]
        #assert self.max_row <= max_row
        #self.max_row = max_row
        #return loc
项目:DEPICT    作者:herandy    | 项目源码 | 文件源码
def bestMap(L1, L2):
    if L1.__len__() != L2.__len__():
        print('size(L1) must == size(L2)')

    Label1 = np.unique(L1)
    nClass1 = Label1.__len__()
    Label2 = np.unique(L2)
    nClass2 = Label2.__len__()

    nClass = max(nClass1, nClass2)
    G = np.zeros((nClass, nClass))
    for i in range(nClass1):
        for j in range(nClass2):
            G[i][j] = np.nonzero((L1 == Label1[i]) * (L2 == Label2[j]))[0].__len__()

    c = linear_assignment_.linear_assignment(-G.T)[:, 1]
    newL2 = np.zeros(L2.__len__())
    for i in range(nClass2):
        for j in np.nonzero(L2 == Label2[i])[0]:
            if len(Label1) > c[i]:
                newL2[j] = Label1[c[i]]

    return accuracy_score(L1, newL2)
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _vlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 0] / lengths))
    points = np.column_stack([ctrs[:, 0], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _hlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 1] / lengths))
    points = np.column_stack([ctrs[:, 1], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines
项目:EZClimate    作者:Litterman    | 项目源码 | 文件源码
def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0):
        """Determine whether a tipping point has occurred, if so reduce consumption for 
        all periods after this date.
        """
        draws = tmp.shape[0]
        disaster = self._disaster_simulation()
        disaster_cons = self._disaster_cons_simulation()
        period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1]

        tmp_scale = np.maximum(self.peak_temp, tmp)
        ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale) 
        prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval)
        # this part may be done better, this takes a long time to loop over
        res = prob_of_survival < disaster
        rows, cols = np.nonzero(res)
        row, count = np.unique(rows, return_counts=True)
        first_occurance = zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])
        for pos in first_occurance:
            consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]])
        return consump
项目:matlab_imresize    作者:fatheral    | 项目源码 | 文件源码
def contributions(in_length, out_length, scale, kernel, k_width):
    if scale < 1:
        h = lambda x: scale * kernel(scale * x)
        kernel_width = 1.0 * k_width / scale
    else:
        h = kernel
        kernel_width = k_width
    x = np.arange(1, out_length+1).astype(np.float64)
    u = x / scale + 0.5 * (1 - 1 / scale)
    left = np.floor(u - kernel_width / 2)
    P = int(ceil(kernel_width)) + 2
    ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
    indices = ind.astype(np.int32)
    weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
    weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
    aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
    indices = aux[np.mod(indices, aux.size)]
    ind2store = np.nonzero(np.any(weights, axis=0))
    weights = weights[:, ind2store]
    indices = indices[:, ind2store]
    return weights, indices
项目:nmt    作者:westrik    | 项目源码 | 文件源码
def decode(self, sentence, src=True):
        '''
        Given an encoded sentence matrix,
        return the represented sentence string (tokenized).
        '''

        words = []

        for word in sentence:
            idxs = np.nonzero(word)[0]
            if len(idxs) > 1:
                raise Exception("Multiple hot bits on word vec")
            elif len(idxs) == 0:
                continue

            if src:
                words.append(self.words_src[0][idxs[0]])
            else:
                words.append(self.words_dst[0][idxs[0]])

        return ' '.join(words)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def move_ellipses(self, coll, cyl=False):
        xz = self.x[:, ::2] if not cyl else np.column_stack(
           [np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]])
        coll.set_offsets(xz)
        #inside = self.inside_wall()
        #margin = np.nonzero(self.alive)[0][self.inside_wall(2.)]
        colors = np.full((self.N,), "b", dtype=str)
        #colors[margin] = "r"
        colors[self.success] = "k"
        colors[self.fail] = "k"
        colors[self.alive & ~self.can_bind] = "r"
        #colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)]
        coll.set_facecolors(colors)
        #y = self.x[:, 1]
        #d = 50.
        #sizes = self.params.rMolecule*(1. + y/d)
        #coll.set(widths=sizes, heights=sizes)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def set_free(self, uid=None):
        ''' reset status of occupied points to zero '''
        self.read()
        data = self.data

        # check for valid id
        if uid in [0,1]:
            print "Error: %s is not a valid ID, returning." %uid
            return None

        if uid is not None:
            # get indices of vertices by id
            vertices = np.nonzero(data["status"] == uid)[0]
        else:
            vertices = np.nonzero(np.logical_and(data["status"] != 0, data["status"] != 1))[0]

        # reset vertices to 0
        data["status"][vertices] = 0
        self.write()
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def _split_data(self):
        counts = np.zeros(self._num_classes)
        labeled_indices = list()
        num_per_class = int(self._num_labels / self._num_classes)
        for i, l in enumerate(self._labels):
            index = np.nonzero(l)[0][0]
            if counts[index] < num_per_class:
                counts[index] += 1
                labeled_indices.append(i)
            elif counts.sum() == self._num_labels:
                break
            else:
                continue
        all_indices = set(range(self._num_train_images))
        unlabeled_indices = list(all_indices - set(labeled_indices))
        images_labeled = self._images[labeled_indices]
        images_unlabeled = self._images[unlabeled_indices]
        labels = self._labels[labeled_indices]
        return images_labeled, images_unlabeled, labels
项目:AbTextSumm    作者:StevenLOL    | 项目源码 | 文件源码
def jaccard(v1, v2):
    '''
    Due to the idiosyncracies of my code the jaccard index is a bit 
    altered. The theory is the same but the implementation might be a bit 
    weird. I do not have two vectors containing the words of both documents
    but instead I have two equally sized vectors. The columns of the vectors 
    are the same and represent the words in the whole corpus. If an entry
    is 1 then the word is present in the document. If it is 0 then it is not present.
    SO first we find the indices of the words in each documents and then jaccard is 
    calculated based on the indices.
    '''  

    indices1 = numpy.nonzero(v1)[0].tolist()
    indices2 = numpy.nonzero(v2)[0].tolist()
    inter = len(set(indices1) & set(indices2))
    un = len(set(indices1) | set(indices2))
    dist = 1 - inter/float(un)
    return dist
项目:CS-SMAF    作者:brian-cleary    | 项目源码 | 文件源码
def get_signature_genes(X,n,lda=10):
    W = np.zeros((X.shape[0],X.shape[0]))
    # coarse search from the bottom
    while (abs(W).sum(1) > 0).sum() < n:
        lda /= 10.
        model = MultiTaskLasso(alpha=lda,max_iter=100,tol=.001,selection='random',warm_start=True)
        model.fit(X.T,X.T)
        W = model.coef_.T
        #print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
    # fine search from the top
    while (abs(W).sum(1) > 0).sum() > n*1.2:
        lda *= 2.
        model.set_params(alpha=lda)
        model.fit(X.T,X.T)
        W = model.coef_.T
        #print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
    # finer search
    while (abs(W).sum(1) > 0).sum() > n:
        lda *= 1.1
        model.set_params(alpha=lda)
        model.fit(X.T,X.T)
        W = model.coef_.T
        #print len(np.nonzero(abs(W).sum(1))[0]),model.score(X.T,X.T)
    return np.nonzero(abs(W).sum(1))[0]
项目:Project101    作者:Wonjuseo    | 项目源码 | 文件源码
def rargmax(vector):
    # random argmax
    m = np.max(vector)
    indices = np.nonzero(vector == m)[0]
    return pr.choice(indices)

# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state

# Non-deterministic environment
项目:Project101    作者:Wonjuseo    | 项目源码 | 文件源码
def rargmax(vector):
    # random argmax
    m = np.max(vector)
    indices = np.nonzero(vector == m)[0]
    return pr.choice(indices)

# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state
项目:Project101    作者:Wonjuseo    | 项目源码 | 文件源码
def rargmax(vector):
    # random argmax
    m = np.max(vector)
    indices = np.nonzero(vector == m)[0]
    return pr.choice(indices)

# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state
项目:type2-fuzzy    作者:h4iku    | 项目源码 | 文件源码
def mg(x, xmf, umf=[0, 1, 1, 0]):
    """Function to compute the membership grades of each x on a T1 FS

    x: list of x values
    xmf: x parameters of the membership function
    umf: u parameters of the membership function
    """

    items = [item for item in sorted(zip(xmf, umf))]
    xmf = [i[0] for i in items]
    umf = [i[1] for i in items]

    u = [None] * len(x)  # membership grade of x
    for i, p in enumerate(x):
        if p <= xmf[0] or p >= xmf[-1]:
            u[i] = 0
        else:
            x_mf = np.array(xmf)
            left = np.nonzero(x_mf < p)[0][-1]
            right = left + 1
            u[i] = umf[left] + (umf[right] - umf[left]) * (p - xmf[left]) / (xmf[right] - xmf[left])

    return u
项目:arlpy    作者:org-arl    | 项目源码 | 文件源码
def ber(x, y, m=2):
    """Measure bit error rate between symbols in x and y.

    :param x: symbol array #1
    :param y: symbol array #2
    :param m: symbol alphabet size (maximum 64)
    :returns: bit error rate

    >>> import arlpy
    >>> arlpy.comms.ber([0,1,2,3], [0,1,2,2], m=4)
    0.125
    """
    x = _np.asarray(x, dtype=_np.int)
    y = _np.asarray(y, dtype=_np.int)
    if _np.any(x >= m) or _np.any(y >= m) or _np.any(x < 0) or _np.any(y < 0):
        raise ValueError('Invalid data for specified m')
    if m == 2:
        return ser(x, y)
    if m > _MAX_M:
        raise ValueError('m > %d not supported' % (_MAX_M))
    n = _np.product(_np.shape(x))*_np.log2(m)
    e = x^y
    e = e[_np.nonzero(e)]
    e = _np.sum(_popcount[e])
    return float(e)/n
项目:quickshear    作者:nipy    | 项目源码 | 文件源码
def flip_axes(data, perms, flips):
    """ Flip a data array along specified axes

    Parameters
    ----------
    data : 3D array
    perms : (3,) sequence of ints
        Axis permutations to perform
    flips : (3,) sequence of bools
        Sequence of indicators for whether to flip along each axis

    Returns
    -------
    3D array
    """
    data = np.transpose(data, perms)
    for axis in np.nonzero(flips)[0]:
        data = nb.orientations.flip_axis(data, axis)
    return data
项目:isp-data-pollution    作者:essandess    | 项目源码 | 文件源码
def draw_links(self,n=1,log_sampling=False):
        """ Draw multiple random links. """
        urls = []
        domain_array = np.array([dmn for dmn in self.domain_links])
        domain_count = np.array([len(self.domain_links[domain_array[k]]) for k in range(domain_array.shape[0])])
        p = np.array([np.float(c) for c in domain_count])
        count_total = p.sum()
        if log_sampling:  # log-sampling [log(x+1)] to bias lower count domains
            p = np.fromiter((np.log1p(x) for x in p), dtype=p.dtype)
        if count_total > 0:
            p = p/p.sum()
            cnts = npr.multinomial(n, pvals=p)
            if n > 1:
                for k in range(cnts.shape[0]):
                    domain = domain_array[k]
                    cnt = min(cnts[k],domain_count[k])
                    for url in random.sample(self.domain_links[domain],cnt):
                        urls.append(url)
            else:
                k = int(np.nonzero(cnts)[0])
                domain = domain_array[k]
                url = random.sample(self.domain_links[domain],1)[0]
                urls.append(url)
        return urls
项目:sequence-based-recommendations    作者:rdevooght    | 项目源码 | 文件源码
def _get_features(self, item, user_id):
        '''Change a tuple (item_id, rating) into a list of features to feed into the RNN
        features have the following structure: [one_hot_encoding, personal_rating on a scale of ten, average_rating on a scale of ten, popularity on a log scale of ten]
        '''

        item_id, rating = item

        if self.use_movies_features:
            one_hot_encoding = np.zeros(self.n_items)
            one_hot_encoding[item_id] = 1

            return np.concatenate((one_hot_encoding, self._get_optional_features(item, user_id)))
        else:
            one_hot_encoding = [item_id]

            optional_features = self._get_optional_features(item, user_id)
            optional_features_ids = np.nonzero(optional_features)[0]

            return np.concatenate((one_hot_encoding, optional_features_ids + self.n_items))
项目:async-rl    作者:muupan    | 项目源码 | 文件源码
def _sample_discrete_actions(batch_probs):
    """Sample a batch of actions from a batch of action probabilities.

    Args:
      batch_probs (ndarray): batch of action probabilities BxA
    Returns:
      List consisting of sampled actions
    """
    action_indices = []

    # Subtract a tiny value from probabilities in order to avoid
    # "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
    batch_probs = batch_probs - np.finfo(np.float32).epsneg

    for i in range(batch_probs.shape[0]):
        histogram = np.random.multinomial(1, batch_probs[i])
        action_indices.append(int(np.nonzero(histogram)[0]))
    return action_indices
项目:pytorch_fnet    作者:AllenCellModeling    | 项目源码 | 文件源码
def get_major_minor_axis(img):
    """
    Finds the major and minor axis as 3d vectors of the passed in image
    :param img: CZYX numpy array
    :return: tuple containing two numpy arrays representing the major and minor axis as 3d vectors
    """
    # do a mean projection if more than 3 axes
    if img.ndim > 3:
        z, y, x = np.nonzero(np.mean(img, axis=tuple(range(img.ndim - 3))))
    else:
        z, y, x = np.nonzero(img)
    coords = np.stack([x - np.mean(x), y - np.mean(y), z - np.mean(z)])
    # eigenvectors and values of the covariance matrix
    evals, evecs = np.linalg.eig(np.cov(coords))
    # return largest and smallest eigenvectors (major and minor axis)
    order = np.argsort(evals)
    return (evecs[:, order[-1]], evecs[:, order[0]])
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def multilabel_to_multiclass (array):
    array = binarization (array)
    return np.array([np.nonzero(array[i,:])[0][0] for i in range (len(array))])
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def tp_filter(X, Y, feat_num=1000, verbose=True):
    ''' TP feature selection in the spirit of the winners of the KDD cup 2001
    Only for binary classification and sparse matrices'''

    if issparse(X) and len(Y.shape)==1  and len(set(Y))==2 and (sum(Y)/Y.shape[0])<0.1: 
        if verbose: print("========= Filtering features...")
        Posidx=Y>0
        #npos = sum(Posidx)
        #Negidx=Y<=0
        #nneg = sum(Negidx)

        nz=X.nonzero()
        mx=X[nz].max()
        if X[nz].min()==mx: # sparse binary
            if mx!=1: X[nz]=1
            tp=csr_matrix.sum(X[Posidx,:], axis=0)
            #fn=npos-tp
            #fp=csr_matrix.sum(X[Negidx,:], axis=0)
            #tn=nneg-fp
        else:
            tp=np.sum(X[Posidx,:]>0, axis=0)
            #tn=np.sum(X[Negidx,:]<=0, axis=0)
            #fn=np.sum(X[Posidx,:]<=0, axis=0)
            #fp=np.sum(X[Negidx,:]>0, axis=0)

        tp=np.ravel(tp)
        idx=sorted(range(len(tp)), key=tp.__getitem__, reverse=True)   
        return idx[0:feat_num]
    else:
        feat_num = X.shape[1]
        return range(feat_num)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def callback_lasso(self, verts):
        p = mpl.path.Path(verts)
        in_selection = p.contains_points(self.lasso_selector.points)
        indices = np.nonzero(in_selection)[0]
        if len(self.lasso_selector.points) != len(self.points[1]):
            self.update_inspect(indices, self.lasso_selector.add_or_remove)
        else:
            self.update_inspect_template(indices, self.lasso_selector.add_or_remove)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def callback_rect(self, eclick, erelease):
        xmin, xmax, ymin, ymax = eclick.xdata, erelease.xdata, eclick.ydata, erelease.ydata
        if xmin > xmax:
            xmin, xmax = xmax, xmin
        if ymin > ymax:
            ymin, ymax = ymax, ymin

        self.score_ax = eclick.inaxes

        if self.score_ax == self.score_ax1:
            score_x, score_y = self.score_x, self.score_y
        elif self.score_ax == self.score_ax2:
            score_x, score_y = self.norms[self.to_consider], self.rates[self.to_consider]
        elif self.score_ax == self.score_ax3:
            score_x, score_y = self.score_z, self.score_y

        in_selection = ((score_x >= xmin) &
                        (score_x <= xmax) &
                        (score_y >= ymin) &
                        (score_y <= ymax))
        indices = np.nonzero(in_selection)[0]
        add_or_remove = None
        if erelease.key == 'shift':
            add_or_remove = 'add'
        elif erelease.key == 'control':
            add_or_remove = 'remove'

        if self.score_ax != self.score_ax2:
            self.update_inspect(indices, add_or_remove)
        else:
            self.update_inspect_template(indices, add_or_remove)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def callback_lasso(self, verts):
        p = mpl.path.Path(verts)
        in_selection = p.contains_points(self.lasso_selector.points)
        indices = np.nonzero(in_selection)[0]
        self.update_inspect(indices, self.lasso_selector.add_or_remove)
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def filter_prediction(self, boxes, probs, cls_idx):
    """Filter bounding box predictions with probability threshold and
    non-maximum supression.

    Args:
      boxes: array of [cx, cy, w, h].
      probs: array of probabilities
      cls_idx: array of class indices
    Returns:
      final_boxes: array of filtered bounding boxes.
      final_probs: array of filtered probabilities
      final_cls_idx: array of filtered class indices
    """
    mc = self.mc

    if mc.TOP_N_DETECTION < len(probs) and mc.TOP_N_DETECTION > 0:
      order = probs.argsort()[:-mc.TOP_N_DETECTION-1:-1]
      probs = probs[order]
      boxes = boxes[order]
      cls_idx = cls_idx[order]
    else:
      filtered_idx = np.nonzero(probs>mc.PROB_THRESH)[0]
      probs = probs[filtered_idx]
      boxes = boxes[filtered_idx]
      cls_idx = cls_idx[filtered_idx]

    final_boxes = []
    final_probs = []
    final_cls_idx = []

    for c in range(mc.CLASSES):
      idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]
      keep = util.nms(boxes[idx_per_class], probs[idx_per_class], mc.NMS_THRESH)
      for i in range(len(keep)):
        if keep[i]:
          final_boxes.append(boxes[idx_per_class[i]])
          final_probs.append(probs[idx_per_class[i]])
          final_cls_idx.append(c)
    return final_boxes, final_probs, final_cls_idx
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, predictions, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%03d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [predictions[i,:]], ['predictions'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, input_batch, predictions, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%03d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [predictions[i,:]], ['predictions'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, predictions, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [predictions[i,:]], ['predictions'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, predictions, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [predictions[i,:]], ['predictions'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, input_batch, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [input_batch[i,:]], ['input'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def write_to_record(id_batch, label_batch, predictions, filenum, num_examples_processed):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_dir + '/' + 'predictions-%04d.tfrecord' % filenum)
    for i in range(num_examples_processed):
        video_id = id_batch[i]
        label = np.nonzero(label_batch[i,:])[0]
        example = get_output_feature(video_id, label, [predictions[i,:]], ['predictions'])
        serialized = example.SerializeToString()
        writer.write(serialized)
    writer.close()