Python numpy 模块,ndenumerate() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.ndenumerate()

项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def saveHintonPlot(self, matrix, num_tests, max_weight=None, ax=None):
        """Draw Hinton diagram for visualizing a weight matrix."""
        fig,ax = plt.subplots(1,1)

        if not max_weight:
            max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))

        ax.patch.set_facecolor('gray')
        ax.set_aspect('equal', 'box')
        ax.xaxis.set_major_locator(plt.NullLocator())
        ax.yaxis.set_major_locator(plt.NullLocator())

        for (x, y), w in np.ndenumerate(matrix):
            color = 'white' if w > 0 else 'black'
            size = np.sqrt(np.abs(0.5*w/num_tests)) # Need to scale so that it is between 0 and 0.5
            rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
                                 facecolor=color, edgecolor=color)
            ax.add_patch(rect)

        ax.autoscale_view()
        ax.invert_yaxis()
        plt.savefig(self.figures_path + self.save_prefix + '-Hinton.eps')
        plt.close()
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        d1len = len(self.data1[t1])
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[self.data1[t1]]
            t2_rep = self.embed[self.data2[t2]]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            curr_pair_feats = list(self.hist_feats[(t1, t2)])
            caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            curr_pair_feats = list(self.hist_feats[(t1, t2)])
            caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:fem    作者:mlp6    | 项目源码 | 文件源码
def write_pml_elems(sorted_pml_elems, pmlfile="elems_pml.dyn"):
    """Create a new elements file that the PML elements.

    :param sorted_pml_elems:
    :param pmlfile: default = elems_pml.dyn
    :returns:
    """
    from numpy import ndenumerate

    pml = open(pmlfile, 'w')
    pml.write('$ PML elements generated by bc.py\n')
    pml.write('*ELEMENT_SOLID\n')
    for i, e in ndenumerate(sorted_pml_elems):
        pml.write('%i,%i,%i,%i,%i,%i,%i,%i,%i,%i\n' % (e['id'], e['pid'],
                                                       e['n1'], e['n2'],
                                                       e['n3'], e['n4'],
                                                       e['n5'], e['n6'],
                                                       e['n7'], e['n8']))
    pml.write('*END\n')
    pml.close()

    return 0
项目:dataset-shift-osdc16    作者:pprett    | 项目源码 | 文件源码
def generate_data(sample_size=200, pd=[[0.4, 0.4], [0.1, 0.1]]):
    pd = np.array(pd)
    pd /= pd.sum()
    offset = 50
    bins = np.r_[np.zeros((1,)), np.cumsum(pd)]
    bin_counts = np.histogram(np.random.rand(sample_size), bins)[0]
    data = np.empty((0, 2))
    targets = []
    for ((i, j), p), count in zip(np.ndenumerate(pd), bin_counts):
        xs = np.random.uniform(low=0.0, high=50.0, size=count) + j * offset
        ys = np.random.uniform(low=0.0, high=50.0, size=count) + -i * offset
        data = np.vstack((data, np.c_[xs, ys]))
        if i == j:
            targets.extend([1] * count)
        else:
            targets.extend([-1] * count)
    return np.c_[data, targets]
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def finite_diff_gradients(self, f, delta=1e-6):
        """
        f is called without parameters, the changes in the parameters happen as a side effect
        """
        gradients = dict()
        fx = f()
        for p in self.parameters_to_optimise:
            original = self.parameters[p].get_value()
            grad = np.zeros_like(original)
            if np.prod(original.shape) > 1:
                for index, _ in np.ndenumerate(original):
                    xh = original.copy()
                    xh[index] += delta
                    self.parameters[p].set_value(xh)
                    grad[index] = (f() - fx) / delta
                    self.parameters[p].set_value(original)
            else:
                xh = original.copy()
                xh += delta
                self.parameters[p].set_value(xh)
                grad = (f() - fx) / delta
                self.parameters[p].set_value(original)
            gradients[p] = grad
        return gradients
项目:Aurora    作者:upul    | 项目源码 | 文件源码
def test_reduce_sum_axis_zero():
    ctx = ndarray.gpu(0)
    shape = (500, 200, 100)
    to_shape = (200, 100)
    x = np.random.uniform(0, 20, shape).astype(np.float32)
    arr_x = ndarray.array(x, ctx=ctx)
    arr_y = ndarray.empty(to_shape, ctx=ctx)
    gpu_op.reduce_sum_axis_zero(arr_x, arr_y)
    y = arr_y.asnumpy()
    y_ = np.sum(x, axis=0)
    for index, _ in np.ndenumerate(y):
        v = y[index]
        v_ = y_[index]
        if abs((v - v_) / v_) > 1e-4:
            print(index, v, v_)
    np.testing.assert_allclose(np.sum(x, axis=0), y, rtol=1e-5)
项目:heliopy    作者:heliopython    | 项目源码 | 文件源码
def _dateToISO(indict):
    """
    covert datetimes to iso strings inside of datamodel attributes
    """
    retdict = dmcopy(indict)
    if isinstance(indict, dict):
        for key in indict:
            if isinstance(indict[key], datetime.datetime):
                retdict[key] = retdict[key].isoformat()
            elif hasattr(indict[key], '__iter__'):
                for idx, el in enumerate(indict[key]):
                    if isinstance(el, datetime.datetime):
                        retdict[key][idx] = el.isoformat()
    else:
        if isinstance(indict, datetime.datetime):
            retdict = retdict.isoformat()
        elif hasattr(indict, '__iter__'):
            retdict = numpy.asanyarray(indict)
            for idx, el in numpy.ndenumerate(indict):
                if isinstance(el, datetime.datetime):
                    retdict[idx] = el.isoformat()
    return retdict
项目:PyBloqs    作者:manahl    | 项目源码 | 文件源码
def _flatten_data(data, chart_cfg, switch_zy=False):
        plot_axes_def = [(0, XAxis), (1, YAxis)]

        # Inject categories into the axis definitions of the plot
        if isinstance(data, NDFrame):
            for i, plot_axis in plot_axes_def[:data.ndim]:
                categories = data.axes[i]
                # Skip numeric indices
                if not categories.is_numeric():
                    chart_cfg = chart_cfg.inherit_many(plot_axis(categories=list(categories)))

        data = [list(index) + [value] for index, value in list(np.ndenumerate(data))]

        if switch_zy:
            for i in xrange(len(data)):
                tmp = data[i][-1]
                data[i][-1] = data[i][-2]
                data[i][-2] = tmp

        return data, chart_cfg
项目:spectroscopy    作者:jgoodknight    | 项目源码 | 文件源码
def returnAmplitudeFromListOfFunctionValues(self, listOfFunctionValues, additive=False):
        """Helper function for the amplitude setting function
        [f(x), g(y), ...]
        additive=True  => F(x, y, ...) = f(x) + g(y) + ...
        additive=False => F(x, y, ...) = f(x) * g(y) * ..."""
        output = self.functionSpaceZero()
        if additive:
            initialValue = 0.0
        else:
            initialValue = 1.0
        for indexTuple, value in np.ndenumerate(output):
            newValue = initialValue
            for tupleIndex, tupleValue in enumerate(indexTuple):
                if additive:
                    newValue += listOfFunctionValues[tupleIndex][tupleValue]
                else:
                    newValue *= listOfFunctionValues[tupleIndex][tupleValue]
            output[indexTuple] = newValue
        return output
项目:CNN-Glasses-Remover    作者:JubilantJerry    | 项目源码 | 文件源码
def generate(width, height, s, output):
    canvas = np.zeros((height, width), dtype='float64')
    max_d = math.sqrt(width**2 + height**2) / 2
    offset_angular = 0
    if (width >= height):
        offset_angular = math.pi / 4
    for (i, j), _ in np.ndenumerate(canvas):
        y = height // 2 - i
        x = j - width // 2
        d = math.sqrt(x**2 + y**2)
        t = math.atan2(y, x)
        canvas[i,j] = (255 / 4) * \
            (2 + radial_sin(d, s, t) + angular_sin (
                d, t, max_d, s, offset_angular))
    f = open(output, 'wb')
    w = png.Writer(width, height, greyscale=True)
    w.write(f, canvas)
    f.close()
项目:pymake    作者:dtrckd    | 项目源码 | 文件源码
def sample(self):
        self._update_m()

        indices = np.ndenumerate(self.count_k_by_j)

        lgg.debug('Sample m...')
        for ind in indices:
            j, k = ind[0]
            count = ind[1]

            if count > 0:
                # Sample number of tables in j serving dishe k
                params = self.prob_jk(j, k)
                sample = categorical(params) + 1
            else:
                sample = 0

            self.m[j, k] = sample

        self.m_dotk = self.m.sum(0)
        self.purge_empty_tables()

        return self.m
项目:wrf-python    作者:NCAR    | 项目源码 | 文件源码
def cartopy_xlim(self, geobounds):
        """Return the x extents in projected coordinates for cartopy.

        Returns:

            :obj:`list`: A pair of [xmin, xmax].

        See Also:

            :mod:`cartopy`, :mod:`matplotlib`

        """
        try:
            _ = len(geobounds)
        except TypeError:
            x_extents= self._cart_extents(geobounds)[0]
        else:
            extents = self._cart_extents(geobounds)
            x_extents = np.empty(extents.shape, np.object)

            for idxs, extent in np.ndenumerate(extents):
                x_extents[idxs] = extent[0]

        return x_extents
项目:wrf-python    作者:NCAR    | 项目源码 | 文件源码
def cartopy_ylim(self, geobounds):
        """Return the y extents in projected coordinates for cartopy.

        Returns:

            :obj:`list`: A pair of [ymin, ymax].

        See Also:

            :mod:`cartopy`, :mod:`matplotlib`

        """
        try:
            _ = len(geobounds)
        except TypeError:
            y_extents= self._cart_extents(geobounds)[1]
        else:
            extents = self._cart_extents(geobounds)
            y_extents = np.empty(extents.shape, np.object)

            for idxs, extent in np.ndenumerate(extents):
                y_extents[idxs] = extent[1]

        return y_extents
项目:mtcnn-caffe    作者:CongWeilin    | 项目源码 | 文件源码
def detect_face_12net(cls_prob,roi,out_side,scale,width,height,threshold):
    in_side = 2*out_side+11
    stride = 0
    if out_side != 1:
        stride = float(in_side-12)/(out_side-1)
    boundingBox = []

    for (x,y), prob in np.ndenumerate(cls_prob):
        if(prob >= threshold):
            original_x1 = int((stride*x + 1)*scale)
            original_y1 = int((stride*y + 1)*scale)
            original_w  = int((12.0 -1)*scale)
            original_h  = int((12.0 -1)*scale)
            original_x2 = original_x1 + original_w
            original_y2 = original_y1 + original_h
            rect = []
            x1 = int(round(max(0     , original_x1 + original_w * roi[0][x][y])))
            y1 = int(round(max(0     , original_y1 + original_h * roi[1][x][y])))
            x2 = int(round(min(width , original_x2 + original_w * roi[2][x][y])))
            y2 = int(round(min(height, original_y2 + original_h * roi[3][x][y])))
        if x2>x1 and y2>y1:
                rect = [x1,y1,x2,y2,prob]
                boundingBox.append(rect)
    return NMS(boundingBox,0.5,'iou')
项目:kvae    作者:simonkamronn    | 项目源码 | 文件源码
def hinton(matrix, max_weight=None, ax=None):
    """Draw Hinton diagram for visualizing a weight matrix."""
    ax = ax if ax is not None else plt.gca()

    if not max_weight:
        max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))

    ax.patch.set_facecolor('gray')
    ax.set_aspect('equal', 'box')
    ax.xaxis.set_major_locator(plt.NullLocator())
    ax.yaxis.set_major_locator(plt.NullLocator())

    for (x, y), w in np.ndenumerate(matrix):
        color = 'white' if w > 0 else 'black'
        size = np.sqrt(np.abs(w) / max_weight)
        rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
                             facecolor=color, edgecolor=color)
        ax.add_patch(rect)

    ax.autoscale_view()
    ax.invert_yaxis()
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def sample_crp_tablecounts(concentration,customers,colweights):
    m = np.zeros_like(customers)
    tot = customers.sum()
    randseq = np.random.random(tot)

    starts = np.empty_like(customers)
    starts[0,0] = 0
    starts.flat[1:] = np.cumsum(np.ravel(customers)[:customers.size-1])

    for (i,j), n in np.ndenumerate(customers):
        w = colweights[j]
        for k in xrange(n):
            m[i,j] += randseq[starts[i,j]+k] \
                    < (concentration * w) / (k + concentration * w)

    return m

### Entropy
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def sample_crp_tablecounts(concentration,customers,colweights):
    m = np.zeros_like(customers)
    tot = customers.sum()
    randseq = np.random.random(tot)

    starts = np.empty_like(customers)
    starts[0,0] = 0
    starts.flat[1:] = np.cumsum(np.ravel(customers)[:customers.size-1])

    for (i,j), n in np.ndenumerate(customers):
        w = colweights[j]
        for k in xrange(n):
            m[i,j] += randseq[starts[i,j]+k] \
                    < (concentration * w) / (k + concentration * w)

    return m

### Entropy
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def hmm_trans_matrix(self):
        # NOTE: more general version, allows different delays, o/w we could
        # construct with np.kron
        if self._hmm_trans_matrix is None:
            ps, delays = map(np.array,zip(*[(d.p,d.delay) for d in self.dur_distns]))
            starts, ends = cumsum(delays,strict=True), cumsum(delays,strict=False)
            trans_matrix = self._hmm_trans_matrix = np.zeros((ends[-1],ends[-1]))

            for (i,j), Aij in np.ndenumerate(self.trans_matrix):
                block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
                if i == j:
                    block[:-1,1:] = np.eye(block.shape[0]-1)
                    block[-1,-1] = 1-ps[i]
                else:
                    block[-1,0] = ps[j]*Aij

        return self._hmm_trans_matrix
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def mf_bwd_trans_matrix(self):
        rs = self.rs
        starts, ends = cumsum(rs,strict=True), cumsum(rs,strict=False)
        trans_matrix = np.zeros((ends[-1],ends[-1]))

        Elnps, Eln1mps = zip(*[d._fixedr_distns[d.ridx]._mf_expected_statistics() for d in self.dur_distns])
        Eps, E1mps = np.exp(Elnps), np.exp(Eln1mps) # NOTE: actually exp(E[ln(p)]) etc

        enters = self.mf_bwd_enter_rows(rs,Eps,E1mps)
        for (i,j), Aij in np.ndenumerate(self.mf_trans_matrix):
            block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]
            block[-1,:] = Aij * eE1mps[i] * enters[j]
            if i == j:
                block[...] += np.diag(np.repeat(eEps[i],rs[i])) \
                        + np.diag(np.repeat(eE1mps[i],rs[i]-1),k=1)

        assert np.all(trans_matrix >= 0)
        return trans_matrix
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def hmm_trans_matrix_orig(self):
        rs, ps, delays = self.rs, self.ps, self.delays
        starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
        trans_matrix = np.zeros((ends[-1],ends[-1]))

        enters = self.bwd_enter_rows
        for (i,j), Aij in np.ndenumerate(self.trans_matrix):
            block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]

            if delays[i] == 0:
                block[-1,:rs[j]] = Aij * enters[j] * (1-ps[i])
            else:
                block[-1,:rs[j]] = Aij * enters[j]

            if i == j:
                block[:rs[i],:rs[i]] += \
                    np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
                if delays[i] > 0:
                    block[rs[i]-1,rs[i]] = (1-ps[i])
                    block[rs[i]:,rs[i]:] = np.eye(delays[i],k=1)

        assert np.allclose(trans_matrix.sum(1),1.)
        return trans_matrix
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def hmm_trans_matrix_1(self):
        rs, ps, delays = self.rs, self.ps, self.delays
        starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
        trans_matrix = np.zeros((ends[-1],ends[-1]))

        enters = self.bwd_enter_rows
        for (i,j), Aij in np.ndenumerate(self.trans_matrix):
            block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]

            block[-1,:rs[j]] = Aij * enters[j] * (1-ps[i])

            if i == j:
                block[-rs[i]:,-rs[i]:] += \
                    np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
                if delays[i] > 0:
                    block[:delays[i]:,:delays[i]] = np.eye(delays[i],k=1)
                    block[delays[i]-1,delays[i]] = 1

        assert np.allclose(trans_matrix.sum(1),1.)
        return trans_matrix
项目:siHMM    作者:Ardavans    | 项目源码 | 文件源码
def hmm_trans_matrix_2(self):
        rs, ps, delays = self.rs, self.ps, self.delays
        starts, ends = cumsum(rs+delays,strict=True), cumsum(rs+delays,strict=False)
        trans_matrix = np.zeros((ends[-1],ends[-1]))

        enters = self.bwd_enter_rows
        for (i,j), Aij in np.ndenumerate(self.trans_matrix):
            block = trans_matrix[starts[i]:ends[i],starts[j]:ends[j]]

            block[-1,0] = Aij * (1-ps[i])

            if i == j:
                block[-rs[i]:,-rs[i]:] += \
                    np.diag(np.repeat(ps[i],rs[i])) + np.diag(np.repeat(1-ps[i],rs[i]-1),k=1)
                if delays[i] > 0:
                    block[:delays[i]:,:delays[i]] = np.eye(delays[i],k=1)
                    block[delays[i]-1,-rs[i]:] = enters[i]

        assert np.allclose(trans_matrix.sum(1),1.)
        return trans_matrix
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def convert_numpy_array_to_line_chart(array, ntype):
    array = np.sort(array)[::-1]

    rows = []
    previous_count = None
    for (index,), count in np.ndenumerate(array):
        if index == 0 or index == len(array)-1:
            rows.append([index, ntype(count)])
        elif previous_count != count:
            previous_index = rows[-1][0]
            if previous_index != index - 1:
                rows.append([index - 1, ntype(previous_count)])
            rows.append([index, ntype(count)])
        previous_count = count
    return rows
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(t1_rep, t2_rep, qnum, hist_size):
    #qnum = len(t1_rep)
    mhist = np.zeros((qnum, hist_size), dtype=np.float32)
    mm = t1_rep.dot(np.transpose(t2_rep))
    for (i,j), v in np.ndenumerate(mm):
        if i >= qnum:
            break
        vid = int((v + 1.) / 2. * (hist_size - 1.))
        mhist[i][vid] += 1.
    mhist += 1.
    mhist = np.log10(mhist)
    return mhist.flatten()
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_binsum(t1_rep, t2_rep, qnum, bin_num):
    mbinsum = np.zeros((qnum, bin_num), dtype=np.float32)
    mm = t1_rep.dot(np.transpose(t2_rep))
    for (i, j), v in np.ndenumerate(mm):
        if i >= qnum:
            break
        vid = int((v + 1.) / 2. * (bin_num - 1.))
        mbinsum[i][vid] += v
    #mhist += 1. # smooth is not needed for computing bin sum
    #mhist = np.log10(mhist) # not needed for computing  bin sum
    return mbinsum.flatten()
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_binsum(t1_rep, t2_rep, qnum, bin_num):
    mbinsum = np.zeros((qnum, bin_num), dtype=np.float32)
    mm = t1_rep.dot(np.transpose(t2_rep))
    for (i, j), v in np.ndenumerate(mm):
        if i >= qnum:
            break
        vid = int((v + 1.) / 2. * (bin_num - 1.))
        mbinsum[i][vid] += v
    #mhist += 1. # smooth is not needed for computing bin sum
    #mhist = np.log10(mhist) # not needed for computing  bin sum
    return mbinsum.flatten()
项目:fem    作者:mlp6    | 项目源码 | 文件源码
def assign_node_constraints(snic, axes, face_constraints):
    """assign node constraints to prescribed node planes

    Nodes shared on multiple faces have are assigned with the following order
    of precedence: z, y, x

    :param snic: sorted node IDs and coordinates from nodes.dyn
    :param axes: mesh axes [x, y, z]
    :param face_constraints: list of DOF strings ordered by
                             ((xmin, max), (ymin, ...)
                             (e.g., (('1,1,1,1,1,1' , '0,1,0,0,1,0'),...)
    :return: bcdict - dictionary of node BC to be written to bc.dyn
    """
    from fem_mesh import extractPlane
    from numpy import ndenumerate

    bcdict = {}
    for axis in range(0, 3):
        for axlim in range(0, 2):
            if axlim == 0:
                axis_limit = axes[axis].min()
            else:
                axis_limit = axes[axis].max()
            planeNodeIDs = extractPlane(snic, axes, (axis, axis_limit))
            for i, id in ndenumerate(planeNodeIDs):
                bcdict[id] = face_constraints[axis][axlim]

    return bcdict
项目:fem    作者:mlp6    | 项目源码 | 文件源码
def constrain_sym_pml_nodes(bcdict, snic, axes, pml_elems, edge_constraints):
    """make sure that all "side" nodes for the PML elements are fully
    constrained, instead of being assigned the symmetry constraints

    THIS FUNCTION IS NOT NEEDED!!

    :param bcdict:
    :param snic:
    :param axes:
    :param pml_elems:
    :param edge_constraints:
    :return: bcdict
    """
    from fem_mesh import extractPlane
    from numpy import ndenumerate

    # look for x symmetry face
    for axis in range(0, 2):
        if edge_constraints[0][axis][0]:
            axis_limit = axes[axis].min()
        elif edge_constraints[0][axis][1]:
            axis_limit = axes[axis].max()
        if axis_limit is not None:
            planeNodeIDs = extractPlane(snic, axes, (axis, axis_limit))
            pml_node_ids_zmin = planeNodeIDs[:, 0:(pml_elems[2][0] + 1)]
            pml_node_ids_zmax = planeNodeIDs[:, -(pml_elems[2][1] + 1):]
            for i, id in ndenumerate(pml_node_ids_zmin):
                bcdict[id] = "%s" % '1,1,1,1,1,1'
            for i, id in ndenumerate(pml_node_ids_zmax):
                bcdict[id] = "%s" % '1,1,1,1,1,1'
        axis_limit = None

    return bcdict
项目:fem    作者:mlp6    | 项目源码 | 文件源码
def create_zdisp(nodeidlist, disp_slice_z_only, zdisp):
    """create zdisp array from squeezed disp_slice at appropriate index

    :param nodeidlist: first column of disp_slice with node IDs in row order
    :param disp_slice_z_only: squeezed disp_slice of just zisp
    :returns: zdisp -- array of z-disp in rows corresponding to node ID
                       (for fast read access)

    """
    import numpy as np

    for i, nodeid in np.ndenumerate(nodeidlist):
        zdisp[nodeid] = disp_slice_z_only[i]

    return zdisp
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_ndenumerate_crash(self):
        # Ticket 1140
        # Shouldn't crash:
        list(np.ndenumerate(np.array([[]])))
项目:MuGo    作者:brilee    | 项目源码 | 文件源码
def expand(self, move_probabilities):
        self.children = {move: MCTSNode(self, move, prob)
            for move, prob in np.ndenumerate(move_probabilities)}
        # Pass should always be an option! Say, for example, seki.
        self.children[None] = MCTSNode(self, None, 0)
项目:NYUD-FCN8s    作者:yxliwhu    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load label image as 1 x height x width integer array of label indices.
        Shift labels so that classes are 0-39 and void is 255 (to ignore it).
        The leading singleton dimension is required by the loss.
        """
        label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
        for (x,y), value in np.ndenumerate(label):
            label[x,y] = self.class_map[0][value-1]
        label = label.astype(np.uint8)
        label -= 1  # rotate labels
        label = label[np.newaxis, ...]
        # pdb.set_trace()
        return label
项目:NYUD-FCN8s    作者:yxliwhu    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load label image as 1 x height x width integer array of label indices.
        Shift labels so that classes are 0-39 and void is 255 (to ignore it).
        The leading singleton dimension is required by the loss.
        """
        label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
        for (x,y), value in np.ndenumerate(label):
            label[x,y] = self.class_map[0][value-1]
        label = label.astype(np.uint8)
        label -= 1  # rotate labels
        label = label[np.newaxis, ...]
        # pdb.set_trace()
        return label
项目:NYUD-FCN8s    作者:yxliwhu    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load label image as 1 x height x width integer array of label indices.
        Shift labels so that classes are 0-39 and void is 255 (to ignore it).
        The leading singleton dimension is required by the loss.
        """
        label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['groundTruth'][0,0][0,0]['SegmentationClass'].astype(np.uint16)
        for (x,y), value in np.ndenumerate(label):
            label[x,y] = self.class_map[0][value-1]
        label = label.astype(np.uint8)
        label -= 1  # rotate labels
        label = label[np.newaxis, ...]
        # pdb.set_trace()
        return label
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def build_from_index(self, index, paths, dirs):
        """ Build index from another index for indices given. """
        if isinstance(paths, dict):
            self._paths = dict((file, paths[file]) for file in index)
        else:
            self._paths = dict((file, paths[pos]) for pos, file in np.ndenumerate(index))
        self.dirs = dirs
        return index
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_ndenumerate_crash(self):
        # Ticket 1140
        # Shouldn't crash:
        list(np.ndenumerate(np.array([[]])))
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def analyze_false(validData,validDataNumbers,validLabels,model):    
    'Calculating precision and recall for best model...'
    predictions = np.squeeze((model.predict(validDataNumbers) > 0.5).astype('int32'))
    c1_inds = np.where(validLabels == 1)[0]
    pos_inds = np.where((predictions+validLabels) == 2)[0] #np.squeeze(predictions) == validLabels
    neg_inds = np.setdiff1d(c1_inds,pos_inds)
    seq_lengths = np.zeros((validData.shape[0]))
    for ind,row in np.ndenumerate(validData):
            seq_lengths[ind] = len(wordpunct_tokenize(row.lower().strip())) 

    mean_true_length = np.mean(seq_lengths[pos_inds])   
    mean_false_length = np.mean(seq_lengths[neg_inds])

    return mean_false_length,mean_true_length
项目:ome-files-py    作者:ome    | 项目源码 | 文件源码
def rgb_pixeldata(self):
    pixels = [np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C'),
              np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C'),
              np.ndarray(shape=[64, 128], dtype=np.dtype('u1'), order='C')]
    for s in range(0, len(pixels)):
        for (y, x), value in np.ndenumerate(pixels[s]):
            if s == 0:
                value = (x * 255) / 128
            if s == 1:
                value = (y * 255) / 64
            if s == 2:
                value = 255 - ((y * 255) / 64)
            pixels[s].itemset((y, x), value)
    return pixels
项目:ome-files-py    作者:ome    | 项目源码 | 文件源码
def grey_pixeldata(self):
    pixels = np.ndarray(shape=[64, 128], dtype=np.dtype('f4'), order='C')
    for (y, x), value in np.ndenumerate(pixels):
        value = x/128.0 + y/64.0
        pixels.itemset((y, x), value)
    return pixels
项目:python3-utils    作者:soldni    | 项目源码 | 文件源码
def argmin_n(m, n):
    best_values = []
    best_index = []
    max_value_heap = []

    for index, value in np.ndenumerate(m):

        if len(best_values) == n:

            if -1 * value < max_value_heap[0][0]:
                # value is larger than the largest value
                # and the list is at capacity
                continue

            _, pos = heapq.heappop(max_value_heap)
            best_values[pos] = value
            best_index[pos] = index
            heapq.heappush(max_value_heap, (-1 * value, pos))
        else:
            heapq.heappush(max_value_heap, (-1 * value, len(best_values)))
            best_values.append(value)
            best_index.append(index)

    pos, best_values = zip(*sorted(enumerate(best_values), key=lambda e: e[1]))
    best_index = [best_index[i] for i in pos]
    return best_index
项目:python3-utils    作者:soldni    | 项目源码 | 文件源码
def argmax_n(m, n):
    best_values = []
    best_index = []
    max_value_heap = []

    for index, value in np.ndenumerate(m):

        if len(best_values) == n:

            if value < max_value_heap[0][0]:
                # value is smaller than the largest value
                # and the list is at capacity
                continue

            _, pos = heapq.heappop(max_value_heap)
            best_values[pos] = value
            best_index[pos] = index
            heapq.heappush(max_value_heap, (value, pos))
        else:
            heapq.heappush(max_value_heap, (value, len(best_values)))
            best_values.append(value)
            best_index.append(index)

    pos, best_values = zip(
        *sorted(enumerate(best_values), key=lambda e: e[1], reverse=True))
    best_index = [best_index[i] for i in pos]
    return best_index
项目:tncontract    作者:andrewdarmawan    | 项目源码 | 文件源码
def __init__(self, tensors, up_label="up", right_label="right",
                 down_label="down", left_label="left",
                 copy_data=True):
        self.up_label = up_label
        self.right_label = right_label
        self.down_label = down_label
        self.left_label = left_label

        if copy_data:
            # Creates copies of tensors in memory
            copied_tensors = []
            for row in tensors:
                copied_tensors.append([x.copy() for x in row])
            self.data = np.array(copied_tensors)
        else:
            # This will not create copies of tensors in memory
            # (just link to originals)
            self.data = np.array(tensors)

        # Every tensor will have four indices corresponding to
        # "left", "right" and "up", "down" labels.
        for i, x in np.ndenumerate(self.data):
            if left_label not in x.labels: x.add_dummy_index(left_label)
            if right_label not in x.labels: x.add_dummy_index(right_label)
            if up_label not in x.labels: x.add_dummy_index(up_label)
            if down_label not in x.labels: x.add_dummy_index(down_label)

    # Add container emulation
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def getCellParameters(self, array, fn=np.mean):
        out = np.arange(len(self.cells),
                        dtype=float).reshape(self.opts['grid'])
        s = array.shape
        for (i, j), n in np.ndenumerate(out):
            m = self.cells[int(n)].getMask(s)
            out[i, j] = fn(array[m])
        return out
项目:ECoG-ClusterFlow    作者:sugeerth    | 项目源码 | 文件源码
def Find_HighlightedEdges(self,weight = 0):
        self.ThresholdData = np.copy(self.data)
        # low_values_indices = self.ThresholdData < weight  # Where values are low
        # self.ThresholdData[low_values_indices] = 0
    # graterindices = [ (i,j) for i,j in np.ndenumerate(self.ThresholdData) if any(i > j) ] 
        # self.ThresholdData[graterindices[:1]] = 0
        # self.ThresholdData = np.tril(self.ThresholdData)
        # print self.ThresholdData, "is the data same??" 
        """
        test 2 highlighted edges there
        """
        # np.savetxt('test2.txt', self.ThresholdData, delimiter=',', fmt='%1.4e')
        self.g = nx.from_numpy_matrix(self.ThresholdData)
项目:gps    作者:cbfinn    | 项目源码 | 文件源码
def finite_differences(func, inputs, func_output_shape=(), epsilon=1e-5):
    """
    Computes gradients via finite differences.
    derivative = (func(x+epsilon) - func(x-epsilon)) / (2*epsilon)
    Args:
        func: Function to compute gradient of. Inputs and outputs can be
            arbitrary dimension.
        inputs: Vector value to compute gradient at.
        func_output_shape: Shape of the output of func. Default is
            empty-tuple, which works for scalar-valued functions.
        epsilon: Difference to use for computing gradient.
    Returns:
        Gradient vector of each dimension of func with respect to each
        dimension of input.
    """
    gradient = np.zeros(inputs.shape+func_output_shape)
    for idx, _ in np.ndenumerate(inputs):
        test_input = np.copy(inputs)
        test_input[idx] += epsilon
        obj_d1 = func(test_input)
        assert obj_d1.shape == func_output_shape
        test_input = np.copy(inputs)
        test_input[idx] -= epsilon
        obj_d2 = func(test_input)
        assert obj_d2.shape == func_output_shape
        diff = (obj_d1 - obj_d2) / (2 * epsilon)
        gradient[idx] += diff
    return gradient
项目:learning-tensorflow    作者:Salon-sai    | 项目源码 | 文件源码
def __init__(self, row, column):
        self.rewards = np.full((row, column), -0.2)
        self.states = np.ones((row, column), dtype=np.int)
        self.states[1, 1] = -1

        self.index_list = [index for index, x in np.ndenumerate(self.states) if x > 0]
        self._init_next_state_table()

        self.rewards[0, column - 1] = 1
        self.rewards[0, 0] = 1
        self.rewards[1, column - 1] = -1

        self.terminal = [(0, column - 1), (0, 0)]