Python builtins 模块,zip() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用builtins.zip()

项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def print_resul(sol):
#==============================================================================
    # Impression des résultats
    pm, model, filename = sol.pm, sol.model, sol.filename
    print('\n\nInversion success!')
    print('Name of file:', filename)
    print('Model used:', model)
    try:
        pm.pop("cond_std")
        pm.pop("tau_i_std")
        pm.pop("m_i_std")
    except:
        pass
    e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])

    v_keys = [e.replace("_std", "") for e in e_keys]
    labels = ["{:<8}".format(x+":") for x in v_keys]
    np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
    for l, v, e in zip(labels, v_keys, e_keys):
        if "noise" not in l:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
        else:
            print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
项目:hsmm4acc    作者:wadpac    | 项目源码 | 文件源码
def plot_perstate(data, hidden_states):
    '''
    Make, for each state, a plot of the data

    Parameters
    ----------
    data : pandas DataFrame
        Data to plot
    hidden_states: iteretable
        the hidden states corresponding to the timesteps
    '''
    num_states = max(hidden_states) + 1
    fig, axs = plt.subplots(
        num_states, sharex=True, sharey=True, figsize=(15, 15))
    colours = plt.cm.rainbow(np.linspace(0, 1, num_states))
    for i, (ax, colour) in enumerate(zip(axs, colours)):
        # Use fancy indexing to plot data in each state.
        data_to_plot = data.copy()
        data_to_plot[hidden_states != i] = 0
        data_to_plot.plot(ax=ax, legend=False)
        ax.set_title("{0}th hidden state".format(i))
        ax.grid(True)
    plt.legend(bbox_to_anchor=(0, -1, 1, 1), loc='lower center')
    plt.show()
项目:kiwi    作者:papaya-mobile    | 项目源码 | 文件源码
def batch_get(self, keys):
        schema_len = len(self.schema)
        schema_names = [k.name for k in self.schema]
        dictkeys = []
        for key in keys:
            if not isinstance(key, (tuple, list)):
                key = [key]
            if schema_len != len(key):
                raise ArgumentError("key `%s` can not match "
                                    "the table's schema" % str(key))
            dictkeys.append(dict(zip(schema_names, key)))

        if not dictkeys:
            return []

        results = self.table.batch_get(dictkeys)
        return self.wrap_result(results)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def is_valid_flatten_or_unflatten(src_axes, dst_axes):
        """
        Checks whether we can flatten OR unflatten from src_axes to dst_axes.

        The requirements are that the components of axes should all be
        present in new_axes and that they should be laid out in the same
        order. This check is symmetric.
        """

        # inflate
        src_axes = Axes.as_flattened_list(src_axes)
        dst_axes = Axes.as_flattened_list(dst_axes)

        # check equal number of Axis
        if len(src_axes) != len(dst_axes):
            return False

        # check all Axis are equal
        equal = [src == dst for src, dst in zip(src_axes, dst_axes)]
        return all(equal)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def _make_strides(inner_size, axes, full_sizes):
    """
    Generates a tuple of strides for a set of axes. See _make_stride
    for a description of the stride given to each axis.

    Arguments:
        inner_size: The total size of all dimensions smaller than
        the axes.
        axes: The axes for which we are generating strides.
        full_sizes: The size of each axis.

    Returns:
        inner_size: The total size of these axes and all smaller dimensions.
        strides: The strides generated for the axes.
    """
    full_strides = []
    for axis, fsz in reversed(list(zip(axes, full_sizes))):
        inner_size, stride = _make_stride(inner_size, axis, fsz)
        full_strides.append(stride)
    return inner_size, tuple(reversed(full_strides))
项目:notebook-molecular-visualization    作者:Autodesk    | 项目源码 | 文件源码
def grid_map(f,v,dims,grids):
    """
    Map function values along a grid
    :param f: function to be evaluated, call signature f(v)
    :param v: vector that sets the static coordinates
    :param dims: ndims-length list of dimensions to vary
    :param grids: ndims-length list of grid values for each dimension
    :return: function value grid
    """
    vmod = deepcopy(v)
    for idx, vals in enumerate(zip(*[g.flat for g in grids])):
        for idim, val in zip(dims, vals): vmod[idim] = val
        if idx == 0:
            firstf = f(vmod)
            gridZ = np.zeros(grids[0].shape) * firstf
            gridZ.flat[0] = firstf
        else:
            gridZ.flat[idx] = f(vmod)
    return gridZ
项目:notebook-molecular-visualization    作者:Autodesk    | 项目源码 | 文件源码
def function_slice(f,v,dims,ranges):
    """
    Return an arbitrary dimensional slice of function values
    :param f: function to be evaluated, call signature f(v)
    :param v: vector that sets the static coordinates
    :param dims: ndims-length list of dimensions to vary
    :param ranges: ndims-list of values along those dimensions
    :return: gridpoints, function values
    """

    assert len(dims)==len(ranges)
    if len(ranges)>1:
        grids = np.meshgrid(*ranges)
    else:
        grids=list(ranges)

    for igrid,(r,g) in enumerate(zip(ranges,grids)):
        grids[igrid] = units_transfer(r,g)

    gridZ = grid_map(f,v,dims,grids)
    return grids,gridZ
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def run(self, data, gpu_data=None, pow_cpus=None,
            kind='binned_linterp', nbins=10, **pdm_kwargs):
        function = 'pdm_%s_%dbins' % (kind, nbins)

        if function not in self.prepared_functions:
            self._compile_and_prepare_functions(nbins=nbins)

        if pow_cpus is None or gpu_data is None:
            gpu_data, pow_cpus = self.allocate(data)
        streams = [s for i, s in enumerate(self.streams) if i < len(data)]
        func = self.prepared_functions[function]
        results = [pdm_async(stream, cdat, gdat, pcpu, func, **pdm_kwargs)
                   for stream, cdat, gdat, pcpu in
                   zip(streams, data, gpu_data, pow_cpus)]

        return results
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def gpu_grid_scalar(t, y, sigma, m, N):
    b = get_b(sigma, m)

    n = int(sigma * N)

    q1, q2, q3 = precomp_psi(t, b, n, m)

    u = (np.floor(n * (t + 0.5) - m)).astype(np.int)

    grid = np.zeros(n)

    inds = np.arange(2 * m + 1)
    for i, (U, Y) in enumerate(zip(u, y)):
        q2vals = np.array([pow(q2[i], j) for j in inds])
        grid[(U + inds) % len(grid)] += Y * q1[i] * q2vals * q3

    return grid
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def test_multiple_datasets(self, ndatas=5):
        datas = [data() for i in range(ndatas)]
        ls_proc = LombScargleAsyncProcess(sigma=nfft_sigma)

        mult_results = ls_proc.run(datas, nyquist_factor=nfac,
                                   samples_per_peak=spp)
        ls_proc.finish()

        sing_results = []

        for d in datas:
            sing_results.extend(ls_proc.run([d], nyquist_factor=nfac,
                                samples_per_peak=spp))
            ls_proc.finish()

        for rb, rnb in zip(mult_results, sing_results):
            fb, pb = rb
            fnb, pnb = rnb

            assert_allclose(pnb, pb, rtol=lsrtol, atol=lsatol)
            assert_allclose(fnb, fb, rtol=lsrtol, atol=lsatol)
项目:incubator-airflow-old    作者:apache    | 项目源码 | 文件源码
def _match_headers(self, header_list):
        if not header_list:
            raise AirflowException("Unable to retrieve header row from file")
        field_names = self.field_dict.keys()
        if len(field_names) != len(header_list):
            self.log.warning("Headers count mismatch"
                              "File headers:\n {header_list}\n"
                              "Field names: \n {field_names}\n"
                              "".format(**locals()))
            return False
        test_field_match = [h1.lower() == h2.lower()
                            for h1, h2 in zip(header_list, field_names)]
        if not all(test_field_match):
            self.log.warning("Headers do not match field names"
                              "File headers:\n {header_list}\n"
                              "Field names: \n {field_names}\n"
                              "".format(**locals()))
            return False
        else:
            return True
项目:fypp    作者:aradi    | 项目源码 | 文件源码
def _get_conditional_content(self, fname, spans, conditions, contents):
        out = []
        ieval = []
        peval = []
        multiline = (spans[0][0] != spans[-1][1])
        for condition, content, span in zip(conditions, contents, spans):
            try:
                cond = bool(self._evaluate(condition, fname, span[0]))
            except Exception as exc:
                msg = "exception occured when evaluating '{0}'"\
                      .format(condition)
                raise FyppFatalError(msg, fname, span, exc)
            if cond:
                if self._linenums and not self._diverted and multiline:
                    out.append(linenumdir(span[1], fname))
                outcont, ievalcont, pevalcont = self._render(content)
                ieval += _shiftinds(ievalcont, len(out))
                peval += pevalcont
                out += outcont
                break
        if self._linenums and not self._diverted and multiline:
            out.append(linenumdir(spans[-1][1], fname))
        return out, ieval, peval
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def setModify(self, modify):
        self.isModify = modify

        if modify:
            self.hide()
            self.pathEdit = pg.PolyLineROI(self.elements(),
                                           closed=True, pen=self.pen, movable=False)
            self.getViewBox().addItem(self.pathEdit)

        elif self.pathEdit is not None:
            #x0,y0 = self.pathEdit.pos()
            for i, h in zip(list(range(self.path.elementCount() - 1)),
                            self.pathEdit.getHandles()):
                e = self.path.elementAt(i)
                x, y = h.pos()

                self.path.setElementPositionAt(i, x, y)
            # last element = first element:
            self.path.setElementPositionAt(
                i + 1, *self.pathEdit.getHandles()[0].pos())
            self.getViewBox().removeItem(self.pathEdit)
            self.pathEdit = None
            self.show()
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def deactivate(self):
        w = self.display.widget
        vb = w.view.vb

        w.setCurrentIndex(0)
        w.display.widget.showTimeline(True)

        hist = w.ui.histogram
        for item, (fn1, fn2) in zip(w.subitems, self._fns):
            hist.sigLookupTableChanged.disconnect(fn1)
            hist.sigLevelsChanged.disconnect(fn2)

            vb.removeItem(item)

        w.subitems = []
        self._fns = []

        ar = vb.state['autoRange']
        vb.state['autoRange'] = [True, True]
        vb.updateAutoRange()
        vb.state['autoRange'] = ar
项目:ldpop    作者:popgenmethods    | 项目源码 | 文件源码
def ordered_log_likelihoods(self, liks):
        try:
            return {time : self.ordered_log_likelihoods(l) for time,l in liks.items()}
        except AttributeError:
            liks = liks * self.antisymmetries

            all_nC = self.config_array[:,:-1,:-1].sum(axis=(1,2))
            liks = liks[all_nC == self.n]

            full_confs = self.config_array[:,:-1,:-1][all_nC == self.n, :, :]

            liks = numpy.log(liks)
            liks -= scipy.special.gammaln(self.n+1)
            for i in (0,1):
                for j in (0,1):
                    liks += scipy.special.gammaln(full_confs[:,i,j]+1)

            full_confs = [tuple(sorted(((i,j),cnf[i,j]) for i in (0,1) for j in (0,1))) for cnf in full_confs]
            return dict(zip(full_confs, liks))
项目:ldpop    作者:popgenmethods    | 项目源码 | 文件源码
def computeLikelihoods(n, exact, popSizes, theta, timeLens, rhoGrid, cores):
    rhoGrid = list(rhoGrid)
    assert rhoGrid == sorted(rhoGrid)

    # make the pool first to avoid copying large objects. maxtasksperchild=1 to avoid memory issues
    executor = Pool(cores, maxtasksperchild=1)

    # make the states and the rates
    states = get_states(n, exact)
    moranRates = MoranRates(states)

    # compute initial distributions and likelihoods
    prevInit = states.getUnlinkedStationary(popSize=popSizes[-1], theta=theta)
    inits = []
    #for rho, rates in reversed(zip(rhoGrid, lastRatesList)):
    for rho in reversed(rhoGrid):
        rates = moranRates.getRates(rho=rho, popSize=popSizes[-1], theta=theta)
        prevInit = stationary(Q=rates, init=prevInit, norm_order=float('inf'), epsilon=1e-2)
        inits.append(prevInit)
    ret = executor.map(getColumnHelper, [(moranRates, rho, theta, popSizes, timeLens, prevInit) for rho,prevInit in zip(reversed(rhoGrid),inits)])
    logging.info("Cleaning up results...")
    ret = [states.ordered_log_likelihoods(result) for result in ret]
    executor.close()

    return [(rho, lik) for rho,lik in zip(rhoGrid, reversed(ret))]
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def __iter__(self):
        """Iterate over the points in the grid.
        Returns
        -------
        params : iterator over dict of string to any
            Yields dictionaries mapping each estimator parameter to one of its
            allowed values.
        """
        for p in self.param_grid:
            # Always sort the keys of a dictionary, for reproducibility
            items = list(p.items())
            if not items:
                yield {}
            else:
                for estimator, grid_list in items:
                    for grid in grid_list:
                        grid_points = sorted(list(grid.items()))
                        keys, values = zip(*grid_points)
                        for v in product(*values):
                            params = dict(zip(keys, v))
                            yield (estimator, params)
项目:FightstickDisplay    作者:calexil    | 项目源码 | 文件源码
def get_fragmented_free_size(self):
        '''Returns the amount of space unused, not including the final
        free block.

        :rtype: int
        '''
        if not self.starts:
            return 0

        # Variation of search for free block.
        total_free = 0
        free_start = self.starts[0] + self.sizes[0]
        for i, (alloc_start, alloc_size) in \
                enumerate(zip(self.starts[1:], self.sizes[1:])):
            total_free += alloc_start - free_start
            free_start = alloc_start + alloc_size

        return total_free
项目:FightstickDisplay    作者:calexil    | 项目源码 | 文件源码
def _dump_draw_list(self):
        def dump(group, indent=''):
            print(indent, 'Begin group', group)
            domain_map = self.group_map[group]
            for _, domain in domain_map.items():
                print(indent, '  ', domain)
                for start, size in zip(*domain.allocator.get_allocated_regions()):
                    print(indent, '    ', 'Region %d size %d:' % (start, size))
                    for key, attribute in domain.attribute_names.items():
                        print(indent, '      ', end=' ')
                        try:
                            region = attribute.get_region(attribute.buffer,
                                                          start, size)
                            print(key, region.array[:])
                        except:
                            print(key, '(unmappable)')
            for child in self.group_children.get(group, ()):
                dump(child, indent + '  ')
            print(indent, 'End group', group)

        print('Draw list for %r:' % self)
        for group in self.top_groups:
            dump(group)
项目:LipidFinder    作者:cjbrasher    | 项目源码 | 文件源码
def categoryRename(mergeDF):
    """Lipid categories are renamed to the standard lipid category names
    as per LIPIDMAPS. The categories_map.csv is used to 'map' old category
    names to new category names

    Args:
        mergeDF (dataframe): input dataframe

    Returns:
        dataframe: output dataframe
    """
    categoryFileDF = pd.read_table(
        "categories_map.csv", sep=',', keep_default_na=False)

    # new way: make the categories df into a dictionary - much faster!!
    catMap = dict(list(zip(categoryFileDF.old_category,
                           categoryFileDF.new_category)))
    mergeDF['CATEGORY'] = mergeDF['CATEGORY'].map(catMap)

    return mergeDF
项目:cryptogram    作者:xinmingzhang    | 项目源码 | 文件源码
def get_fragmented_free_size(self):
        '''Returns the amount of space unused, not including the final
        free block.

        :rtype: int
        '''
        if not self.starts:
            return 0

        # Variation of search for free block.
        total_free = 0
        free_start = self.starts[0] + self.sizes[0]
        for i, (alloc_start, alloc_size) in \
                enumerate(zip(self.starts[1:], self.sizes[1:])):
            total_free += alloc_start - free_start
            free_start = alloc_start + alloc_size

        return total_free
项目:cryptogram    作者:xinmingzhang    | 项目源码 | 文件源码
def _dump_draw_list(self):
        def dump(group, indent=''):
            print(indent, 'Begin group', group)
            domain_map = self.group_map[group]
            for _, domain in domain_map.items():
                print(indent, '  ', domain)
                for start, size in zip(*domain.allocator.get_allocated_regions()):
                    print(indent, '    ', 'Region %d size %d:' % (start, size))
                    for key, attribute in domain.attribute_names.items():
                        print(indent, '      ', end=' ')
                        try:
                            region = attribute.get_region(attribute.buffer,
                                                          start, size)
                            print(key, region.array[:])
                        except:
                            print(key, '(unmappable)')
            for child in self.group_children.get(group, ()):
                dump(child, indent + '  ')
            print(indent, 'End group', group)

        print('Draw list for %r:' % self)
        for group in self.top_groups:
            dump(group)
项目:formic    作者:scottbelden    | 项目源码 | 文件源码
def test_rooted(self):
        curdir = os.getcwd()
        full = os.path.dirname(os.path.dirname(__file__))
        drive, dir = os.path.splitdrive(full)
        wild = "**" + os.path.sep + "*.rst"
        os.chdir(full)
        try:
            fileset = FileSet(include=wild, directory=full)
            for filename in fileset.qualified_files():
                print(filename)
            absolute = [filename for filename in FileSet(include=wild, directory=full)]
            relative = [filename for filename in FileSet(include=wild)]
            rooted = [filename for filename in FileSet(include=os.path.join(dir, wild), directory=drive + os.path.sep)]
            assert len(relative) == len(absolute) == len(rooted)
            combined = zip(rooted, relative, absolute)
            for root, rel, abso in combined:
                print(root, "<->", rel, "<->", abso)
                assert root.endswith(rel)
                assert abso.endswith(rel)
        finally:
            os.chdir(curdir)
项目:sims_featureScheduler    作者:lsst    | 项目源码 | 文件源码
def read_fields():
    """
    Read in the old Field coordinates
    Returns
    -------
    numpy.array
        With RA and dec in radians.
    """
    names = ['RA', 'dec']
    types = [float, float]
    data_dir = os.path.join(getPackageDir('sims_featureScheduler'), 'python/lsst/sims/featureScheduler/')
    filepath = os.path.join(data_dir, 'fieldID.lis')
    fields = np.loadtxt(filepath, dtype=list(zip(names, types)))
    fields['RA'] = np.radians(fields['RA'])
    fields['dec'] = np.radians(fields['dec'])
    return fields
项目:sims_featureScheduler    作者:lsst    | 项目源码 | 文件源码
def hp_kd_tree(nside=set_default_nside(), leafsize=100):
    """
    Generate a KD-tree of healpixel locations

    Parameters
    ----------
    nside : int
        A valid healpix nside
    leafsize : int (100)
        Leafsize of the kdtree

    Returns
    -------
    tree : scipy kdtree
    """
    hpid = np.arange(hp.nside2npix(nside))
    ra, dec = _hpid2RaDec(nside, hpid)
    x, y, z = treexyz(ra, dec)
    tree = kdtree(list(zip(x, y, z)), leafsize=leafsize, balanced_tree=False, compact_nodes=False)
    return tree
项目:rnn-role    作者:mtanti    | 项目源码 | 文件源码
def parse(data):
            indexes = list()
            lens = list()
            images = list()
            for (caption_group, img) in zip(data['captions'], data['images']):
                for caption in caption_group:
                    indexes_ = [ token_to_index.get(token, unknown_index) for token in caption ]
                    indexes.append(indexes_)
                    lens.append(len(indexes_)+1) #add 1 due to edge token
                    images.append(img)

            maxlen = max(lens)

            in_mat  = np.zeros((len(indexes), maxlen), np.int32)
            out_mat = np.zeros((len(indexes), maxlen), np.int32)
            for (row, indexes_) in enumerate(indexes):
                in_mat [row,:len(indexes_)+1] = [edge_index]+indexes_
                out_mat[row,:len(indexes_)+1] = indexes_+[edge_index]
            return (in_mat, out_mat, np.array(lens, np.int32), np.array(images))
项目:stream2segment    作者:rizac    | 项目源码 | 文件源码
def tst_perf():
    N = 1000
    lat1 = np.random.randint(0, 90, N).astype(float)
    lon1 = np.random.randint(0, 90, N).astype(float)
    lat2 = np.random.randint(0, 90, N).astype(float)
    lon2 = np.random.randint(0, 90, N).astype(float)

    s = time.time()
    s2sloc2deg(lat1, lon1, lat2, lon2)
    end = time.time() - s

    s2 = time.time()
    for l1, l2, l3, l4 in zip(lat1, lon1, lat2, lon2):
        s2sloc2deg(l1, l2, l3, l4)
    end2 = time.time() - s2

    print("%d loops. Numpy loc2deg: %f, obspy loc2deg: %f" % (N, end, end2))
项目:sparser    作者:sparser    | 项目源码 | 文件源码
def parse(self, string, do_error=True):
        """
        :param str string: the string captured within the dict
        :rtype: {var_name: var_val}
        """
        match = re.match(self.translated_patt, string, re.DOTALL)
        if not match:
            if not do_error:
                return None
            # separate by loops so that we can point closer to the place that doesn't work
            for section in self.translated_patt.split('(?P<.*?>.*?)'):
                if not re.search(section, string, re.DOTALL):
                    raise SparserValueError("%r is unmatched for string %r" % (section, string))
            raise SparserValueError("%r is unmatched" % string)

        ret = {}
        for sub_match, d_entry in zip(match.groups(), self.d_entries):
            try:
                ret[d_entry.name] = d_entry.cb(sub_match)
            except TypeError:
                ret[d_entry.name] = d_entry.cb(unicode(sub_match))

        return ret
项目:reframe    作者:eth-cscs    | 项目源码 | 文件源码
def test_findall(self):
        res = evaluate(findall('Step: \d+', self.tempfile))
        self.assertEqual(3, builtins.len(res))

        res = evaluate(findall('Step:.*', self.tempfile))
        self.assertEqual(3, builtins.len(res))

        res = evaluate(findall('Step: [12]', self.tempfile))
        self.assertEqual(2, builtins.len(res))

        # Check the matches
        for expected, match in builtins.zip(['Step: 1', 'Step: 2'], res):
            self.assertEqual(expected, match.group(0))

        # Check groups
        res = evaluate(findall('Step: (?P<no>\d+)', self.tempfile))
        for step, match in builtins.enumerate(res, start=1):
            self.assertEqual(step, builtins.int(match.group(1)))
            self.assertEqual(step, builtins.int(match.group('no')))
项目:hsmm4acc    作者:wadpac    | 项目源码 | 文件源码
def take_subsequences(dfs):
    """
    Make subsequences of the data that are completely valid.

    Parameters
    ----------
    dfs : dict
        dict holding all the merged dataframes (result from process_data)

    Returns
    -------
    dict holding all the subsequences

    """
    subsets = {}
    for key in list(dfs.keys()):
        dataset = dfs[key]
        invalids = [1] + list(dataset['invalid']) + [1]
        starts = [i for i in range(1, len(invalids) - 1) if invalids[i - 1] == 1 and invalids[i] == 0]
        ends = [i for i in range(1, len(invalids)) if invalids[i - 1] == 0 and invalids[i] == 1]
        dataset['subset'] = -1
        for i, (s, e) in enumerate(zip(starts, ends)):
            # Some minimum length
            if e - s > 300:
                dataset.loc[s - 1:e - 1, 'subset'] = i
                subsets[(key, i)] = (dataset[s - 1:e - 1].copy())
    return subsets
项目:hsmm4acc    作者:wadpac    | 项目源码 | 文件源码
def iterate_hsmm_batch(X_list, model, current_states, trunc,
                       example_index=None, axis=None):
    # First time, the states need to be initalized:
    """

    Parameters
    ----------
    X_list : list of Numpy arrays
        The sequences of shape (num_timesteps, num_channels)
    model : pyhsmm model
        The HSMM model
    current_states : list of arrays
        The resulting statesequences of previous iteration
    trunc : int, optional
        Maximum duration of a state, for optimization
    example_index : int, optional
        Which of the sequences to use as an example for plotting
    axis : pyplot Axis
        axis to plot the example sequence

    Returns
    -------

    """
    if current_states is None:
        current_states = [np.zeros((X.shape[0])) for X in X_list]
        for X in X_list:
            model.add_data(X, trunc=trunc)
    else:
        for i, X in enumerate(X_list):
            model.add_data(X, stateseq=current_states[i], trunc=trunc)
    model.resample_model()
    newstates = model.stateseqs
    hamdis = [np.mean(a != b) for a, b in zip(current_states, newstates)]

    # Visualize
    if example_index is not None:
        model.plot_stateseq(example_index, ax=axis, draw=False)

    model.states_list = []
    return model, hamdis, newstates
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def _profiles_index(self):
        """
        read profiles.index and make hash array

        Notes
        -----
        sets the attributes.

        log_ind : hash array that returns profile.data or log.data
        file number from model number.

        model : the models for which profile.data or log.data is
        available

        """

        prof_ind_name = self.prof_ind_name

        f = open(self.sldir+'/'+prof_ind_name,'r')
        line = f.readline()
        numlines=int(line.split()[0])
        print(str(numlines)+' in profiles.index file ...')

        model=[]
        log_file_num=[]
        for line in f:
            model.append(int(line.split()[0]))
            log_file_num.append(int(line.split()[2]))

        log_ind={}    # profile.data number from model
        for a,b in zip(model,log_file_num):
            log_ind[a] = b

        self.log_ind=log_ind
        self.model=model

# let's start with functions that aquire data
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def __init__(self, fname=None, gdbdir=None, gdbload=True,
                 iniabufile='frames/mppnp/USEEPP/iniab2.0E-02GN93.ppn'):
        print('Reading in... this takes a little bit')

        if iniabufile[0] != '/':
            iniabufile = get_svnpath() + iniabufile

        # grab data
        header_desc, header_data, desc, data = preprocessor(fname,gdbdir,gdbload)
        # make dictionary
        descdict = dict(list(zip(header_desc,list(range(len(header_desc))))))
        datadict = dict(list(zip(header_data,list(range(len(header_data))))))

        # style definer
        header_style, style = style_creator(desc,descdict)
        styledict = dict(list(zip(header_style,list(range(len(header_style))))))

        # make private instances w/ all the data
        self._header_desc = header_desc
        self._header_data = header_data
        self._header_style = header_style
        self._desc = desc
        self._data = data
        self._style = style
        self._descdict = descdict
        self._datadict = datadict
        self._styledict = styledict
        # make the working data
        self.header_desc = header_desc
        self.header_data = header_data
        self.header_style = header_style
        self.desc = desc
        self.data = data
        self.style = style
        self.descdict = descdict
        self.datadict = datadict
        self.styledict = styledict
        self.inut = iniabu(iniabufile)
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def   define_zip_index_for_species(names_ppn_world,
                                   number_names_ppn_world):
    ''' This just give back cl, that is the original index as it is read from files from a data file.'''

    #connect the specie number in the list, with the specie name
    global cl
    cl={}
    for a,b in zip(names_ppn_world,number_names_ppn_world):
        cl[a] = b
项目:kiwi    作者:papaya-mobile    | 项目源码 | 文件源码
def test_batch_write_2(self, UserAction):
        UA = UserAction

        with UA.batch_write() as batch:
            pass

        with UA.batch_write() as batch:
            batch.add(UA(id=100, time=100, name='100'))
            batch.add(UA(id=101, time=101, name='101'))
            batch.add(UA(id=102, time=102, name='102'))
            batch.add(UA(id=103, time=103, name='103'))

            with pytest.raises(ArgumentError):
                batch.add(123444)

        keys = list(zip(list(range(100, 104)), list(range(100, 104))))
        assert set([(u.id, u.time) for u in UA.batch_get(keys)]) == set(keys)

        with UA.batch_write() as batch:
            batch.delete(UA(id=100, time=100))
            batch.delete({'id': 101, 'time': 101})

            with pytest.raises(ArgumentError):
                batch.delete(102)
            with pytest.raises(ArgumentError):
                batch.delete({'time': 103})

        assert set([(u.id, u.time) for u in UA.batch_get(keys)]
                   ) == set([(102, 102), (103, 103)])

        with UA.batch_write() as batch:
            batch.delete({'id': 102, 'time': 102})
            batch.add(UA(id=100, time=100, name='100'))
            batch.delete({'id': 103, 'time': 103})

        assert set([(u.id, u.time) for u in UA.batch_get(keys)]
                   ) == set([(100, 100)])

        UA.delete(id=100, time=100)
        assert set([(u.id, u.time) for u in UA.batch_get(keys)]) == set()
项目:kiwi    作者:papaya-mobile    | 项目源码 | 文件源码
def prepare(self):
        parts = []
        for key_cls, field in zip(
                (dynamo.HashKey, dynamo.RangeKey), self.parts):
            key = key_cls(field.name, data_type=field.data_type)
            parts.append(key)
        return dict(name=self.name, parts=parts)
项目:kiwi    作者:papaya-mobile    | 项目源码 | 文件源码
def get_item(self, *args):
        '''
        not support `consistent`, `attributes` yet
        '''
        if len(self.schema) != len(args):
            raise ArgumentError("args can not match the table's schema")
        kwargs = dict()
        for key, value in zip(self.schema, args):
            kwargs[key.name] = value
        try:
            return self.table.get_item(**kwargs)
        except dynamo.ItemNotFound:  # ItemNotFound
            return None
项目:vaping    作者:20c    | 项目源码 | 文件源码
def parse_line(line):
    if line.startswith('CNTR'):
        keys = (x[0] for x in _KEYDEF['CNTR'])
        typs = (x[1] for x in _KEYDEF['CNTR'])
        return {k: t(d) for (d, k, t) in zip(line.split(','), keys, typs)}
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def train_or_val_pairs(self, setn):
        """
        untar imagenet tar files into directories that indicate their label.

        returns [(filename, label), ...] for train or val set partitions
        """
        img_dir = os.path.join(self.out_dir, setn)

        neon_logger.display("Extracting %s files" % (setn))
        root_tf_path = self.tars[setn]
        if not os.path.exists(root_tf_path):
            raise IOError(("tar file {} not found. Ensure you have ImageNet downloaded"
                           ).format(root_tf_path))

        try:
            root_tf = tarfile.open(root_tf_path)
        except tarfile.ReadError as e:
            raise ValueError('ReadError opening {}: {}'.format(root_tf_path, e))

        label_dict = self.extract_labels(setn)
        subpaths = root_tf.getmembers()
        arg_iterator = zip(repeat(self.target_size), repeat(root_tf_path), repeat(img_dir),
                           repeat(setn), repeat(label_dict), subpaths)
        pool = multiprocessing.Pool()

        pairs = []
        for pair_list in tqdm.tqdm(pool.imap_unordered(process_i1k_tar_subpath, arg_iterator),
                                   total=len(subpaths)):
            pairs.extend(pair_list)
        pool.close()
        pool.join()
        root_tf.close()

        return pairs
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def set_shape(self, shape):
        """
        Set shape of Axes

        Args:
            shape: tuple or list of shapes, must be the same length as the axes
        """
        if len(shape) != len(self._axes):
            raise ValueError("shape's length %s must be equal to axes' length"
                             "%s" % (len(shape), len(self)))
        for axis, length in zip(self._axes, shape):
            axis.length = length
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __eq__(self, other):
        return other.is_flattened\
            and all(l == r for l, r in zip(self.axes, other.axes))
项目:hakkuframework    作者:4shadoww    | 项目源码 | 文件源码
def oldzip(*args, **kwargs):
        return list(builtins.zip(*args, **kwargs))
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def check_are_lists_equal(self, a, b):
        self.assertEquals(len(a), len(b))
        for aa, bb in zip(a, b):
            self.check_are_equal(aa, bb)
项目:dcinside    作者:carcdrcons    | 项目源码 | 文件源码
def parse_comments(html):
    d = pq(html)

    # comments
    css_selector = '.gallery_re_contents tr.reply_line'
    found = d(css_selector)

    name_areas = found('td.user.user_layer')
    reply_areas = found('td.reply')

    user_names = make_list(yield_user_name, name_areas)
    user_IDs = make_list(yield_user_id, name_areas)
    replies = make_list(yield_reply, reply_areas)
    raw_replies = make_list(yield_raw_reply, reply_areas)
    re_times = make_list(yield_reply_time, found)
    IPs = make_list(yield_ip, reply_areas)
    reply_nums = make_list(yield_reply_num, d)
    delete_values = make_list(yield_delete_value, d)

    orgin_nums = [i[-1] for i in delete_values]

    l = []
    for i in zip(user_names, user_IDs, replies,
                 raw_replies, re_times, IPs, reply_nums, orgin_nums):
        d = {}
        d['user_name'] = i[0]
        d['user_id'] = i[1]
        d['reply'] = i[2]
        d['raw_reply'] = i[3]
        d['re_time'] = i[4]
        d['IP'] = i[5]
        d['reply_num'] = i[6]
        d['orgin_nums'] = i[7]
        l.append(d)

    return l
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def var_tophat(t, y, w, freq, dphi):
    var = 0.
    for i, (T, Y, W) in enumerate(zip(t, y, w)):
        mbar = 0.
        wtot = 0.
        for j, (T2, Y2, W2) in enumerate(zip(t, y, w)):
            dph = dphase(abs(T2 - T), freq)
            if dph < dphi:
                mbar += W2 * Y2
                wtot += W2

        var += W * (Y - mbar / wtot)**2

    return var
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def binned_pdm_model(t, y, w, freq, nbins, linterp=True):

    if len(t) == 0:
        return lambda p, **kwargs: np.zeros_like(p)

    bin_means = np.zeros(nbins)
    phase = (t * freq) % 1.0
    bins = [int(p * nbins) % nbins for p in phase]

    for i in range(nbins):
        wtot = max([sum([W for j, W in enumerate(w) if bins[j] == i]), 1E-10])
        bin_means[i] = sum([W * Y for j, (Y, W) in enumerate(zip(y, w))
                            if bins[j] == i]) / wtot

    def pred_y(p, nbins=nbins, linterp=linterp, bin_means=bin_means):
        bs = np.array([int(P * nbins) % nbins for P in p])
        if not linterp:
            return bin_means[bs]
        alphas = p * nbins - np.floor(p * nbins) - 0.5
        di = np.floor(alphas).astype(np.int32)
        bins0 = bs + di
        bins1 = bins0 + 1

        alphas[alphas < 0] += 1
        bins0[bins0 < 0] += nbins
        bins1[bins1 >= nbins] -= nbins

        return (1 - alphas) * bin_means[bins0] + alphas * bin_means[bins1]

    return pred_y
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def nfft_against_direct_sums(self, samples_per_peak=spp,
                                 f0=None, scaled=True):
        t, tsc, y, err = data(samples_per_peak=samples_per_peak)

        nf = int(nfft_sigma * len(t))

        df = 1./(samples_per_peak * (max(t) - min(t)))
        if f0 is None:
            f0 = -0.5 * nf * df
        k0 = int(f0 / df)

        f0 = k0 if scaled else k0 * df
        tg = tsc if scaled else t
        sppg = samples_per_peak

        gpu_nfft = simple_gpu_nfft(tg, y, nf, sigma=nfft_sigma, m=nfft_m,
                                   minimum_frequency=f0,
                                   samples_per_peak=sppg)

        freqs = (float(k0) + np.arange(nf))
        if not scaled:
            freqs *= df
        direct_dft = direct_sums(tg, y, freqs)

        tols = dict(rtol=nfft_rtol, atol=nfft_atol)

        def dsort(arr0, arr):
            d = np.absolute(arr0 - arr)
            return np.argsort(-d)

        inds = dsort(np.real(direct_dft), np.real(gpu_nfft))

        npr = 5
        q = list(zip(inds[:npr], direct_dft[inds[:npr]], gpu_nfft[inds[:npr]]))
        for i, dft, gnfft in q:
            print(i, dft, gnfft)
        assert_allclose(np.real(direct_dft), np.real(gpu_nfft), **tols)
        assert_allclose(np.imag(direct_dft), np.imag(gpu_nfft), **tols)
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def test_nfft_adjoint_async(self, f0=0., ndata=10,
                                batch_size=3, use_double=False):
        datas = []
        for i in range(ndata):
            t, tsc, y, err = data()
            nf = int(nfft_sigma * len(t))

            datas.append((t, y, nf))

        kwargs = dict(minimum_frequency=f0, samples_per_peak=spp)

        proc = NFFTAsyncProcess(sigma=nfft_sigma, m=nfft_m, autoset_m=False,
                                use_double=use_double)

        single_nffts = []
        for t, y, nf in datas:
            nfft = simple_gpu_nfft(t, y, nf, sigma=nfft_sigma, m=nfft_m,
                                   use_double=use_double, **kwargs)
            single_nffts.append(nfft)

        multi_nffts = proc.run(datas, **kwargs)

        batch_nffts = proc.batched_run(datas, batch_size=batch_size, **kwargs)
        proc.finish()

        tols = dict(rtol=nfft_rtol, atol=nfft_atol)
        for ghat_m, ghat_s, ghat_b in zip(multi_nffts, single_nffts,
                                          batch_nffts):
            assert_allclose(ghat_s.real, ghat_m.real, **tols)
            assert_allclose(ghat_s.imag, ghat_m.imag, **tols)

            assert_allclose(ghat_s.real, ghat_b.real, **tols)
            assert_allclose(ghat_s.imag, ghat_b.imag, **tols)
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def test_multiple_datasets(self, ndatas, **kwargs):
        datas = [data() for i in range(ndatas)]
        proc = ConditionalEntropyAsyncProcess(**kwargs)

        df = 0.02
        max_freq = 1.1
        min_freq = 0.9
        nf = int((max_freq - min_freq) / df)
        freqs = min_freq + df * np.arange(nf)

        mult_results = proc.run(datas, freqs=freqs)
        proc.finish()

        sing_results = []

        for d in datas:
            sing_results.extend(proc.run([d], freqs=freqs))
            proc.finish()

        for rb, rnb in zip(mult_results, sing_results):
            fb, pb = rb
            fnb, pnb = rnb

            assert(not any(np.isnan(pb)))
            assert(not any(np.isnan(pnb)))

            assert_allclose(pnb, pb, rtol=lsrtol, atol=lsatol)
            assert_allclose(fnb, fb, rtol=lsrtol, atol=lsatol)