Python builtins 模块,range() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用builtins.range()

项目:live-plotter    作者:anandtrex    | 项目源码 | 文件源码
def mandelbrot(h, w, maxit):
    """
    Returns an image of the Mandelbrot fractal of size (h,w).
    """
    y, x = np.ogrid[-1.4:1.4:h * 1j, -2:0.8:w * 1j]
    c = x + y * 1j
    z = c
    divtime = maxit + np.zeros(z.shape, dtype=int)

    for i in range(maxit):
        z = z ** 2 + c
        diverge = z * np.conj(z) > 2 ** 2
        div_now = diverge & (divtime == maxit)
        divtime[div_now] = i + 100
        z[diverge] = 2
        logger.debug("Updating divtime")
        recorder.record('divtime', divtime)

    return divtime
项目:conec    作者:cod3licious    | 项目源码 | 文件源码
def _make_table(self, table_size=100000000., power=0.75):
        """
        Create a table using stored vocabulary word counts for drawing random words in the negative
        sampling training routines.
        Called internally from `build_vocab()`.
        """
        vocab_size = len(self.vocab)
        logger.info("constructing a table with noise distribution from %i words" % vocab_size)
        # table (= list of words) of noise distribution for negative sampling
        self.table = np.zeros(int(table_size), dtype=int)
        # compute sum of all power (Z in paper)
        train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
        # go through the whole table and fill it up with the word indexes proportional to a word's count**power
        widx = 0
        # normalize count^0.75 by Z
        d1 = self.vocab[self.index2word[widx]].count**power / train_words_pow
        for tidx in range(int(table_size)):
            self.table[tidx] = widx
            if tidx / table_size > d1:
                widx += 1
                d1 += self.vocab[self.index2word[widx]].count**power / train_words_pow
            if widx >= vocab_size:
                widx = vocab_size - 1
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def logp_trace(model):
    """
    return a trace of logp for model
    """
    #init
    db = model.db
    n_samples = db.trace('deviance').length()
    logp = np.empty(n_samples, np.double)
    #loop over all samples
    for i_sample in range(n_samples):
        #set the value of all stochastic to their 'i_sample' value
        for stochastic in model.stochastics:
            try:
                value = db.trace(stochastic.__name__)[i_sample]
                stochastic.value = value

            except KeyError:
                print("No trace available for %s. " % stochastic.__name__)

        #get logp
        logp[i_sample] = model.logp
    return logp
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def logp_trace(model):
    """
    return a trace of logp for model
    """
    #init
    db = model.db
    n_samples = db.trace('deviance').length()
    logp = np.empty(n_samples, np.double)
    #loop over all samples
    for i_sample in range(n_samples):
        #set the value of all stochastic to their 'i_sample' value
        for stochastic in model.stochastics:
            try:
                value = db.trace(stochastic.__name__)[i_sample]
                stochastic.value = value

            except KeyError:
                print("No trace available for %s. " % stochastic.__name__)

        #get logp
        logp[i_sample] = model.logp
    return logp
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:meme_get    作者:memegen    | 项目源码 | 文件源码
def _memes_on_page(self, page_num, n):
        """ Get num memes on page

        memegenerator.net has a convenient api that allows us to get memes
        in JSON format.
        API Documentation: http://version1.api.memegenerator.net/
        """

        if n > self._posts_per_page:
            return None

        # Use the helper function to get a list of memes on the page
        meme_list = self._get_memes_helper(page_num)

        for i in range(n):
            self._meme_pool.add(meme_list[i])
            self._meme_deque.appendleft(meme_list[i])
项目:meme_get    作者:memegen    | 项目源码 | 文件源码
def getbounds(area):
    xmin = area[0][0]
    xmax = area[0][0]
    ymin = area[0][1]
    ymax = area[0][1]
    for i in range(0, len(area)):
        if area[i][0] < xmin:
            xmin = area[i][0]
        if area[i][1] < ymin:
            ymin = area[i][1]
        if area[i][0] > xmax:
            xmax = area[i][0]
        if area[i][1] > ymax:
            ymax = area[i][1]
    return xmin - 1, ymin - 1, xmax + 1, ymax + 1

# draw a boundary
项目:meme_get    作者:memegen    | 项目源码 | 文件源码
def normalize(scores):
    scores = sorted(scores, key=lambda x: x[1], reverse=True)

    ns = sorted(scores, key=lambda x: x[1], reverse=True)

    for i in range(0, len(scores)):

        if scores[i][1] <= 0 or scores[0][1] == 0:
            ns[i] = (scores[i][0], 0)
        else:
            n = old_div((scores[i][1] * 1.0), scores[0][1])
            ns[i] = (scores[i][0], old_div(int(n * 1000), 1000.0))

    return ns

# print OCR result
项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def to_csv_row(json_data):

  video_id = json_data["video_id"]

  class_indexes = json_data["class_indexes"]
  predictions = json_data["predictions"]

  if isinstance(video_id, list):
    video_id = video_id[0]
    class_indexes = class_indexes[0]
    predictions = predictions[0]

  if len(class_indexes) != len(predictions):
    raise ValueError(
        "The number of indexes (%s) and predictions (%s) must be equal." 
        % (len(class_indexes), len(predictions)))

  return (video_id.decode('utf-8') + "," + " ".join("%i %f" % 
      (class_indexes[i], predictions[i]) 
      for i in range(len(class_indexes))) + "\n")
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _update_statistics(self, new_stats, stats):
        new_stats = create_dict(new_stats)
        if stats is None:
            stats = new_stats
            return stats

        # update the stats layerwise
        for l_i in range(len(stats)):

            for subtype,_ in subtypes:
                # TODO: Have to check the type to see if this is needed
                cnt_old = 1.0 * stats[l_i][subtype]['cnt']
                stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
                                              + new_stats[l_i][subtype]['cnt'])
                norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)

                for key in subtype_keys:
                    if key not in subtype_keys_no_aggregation:
                        tmp_old = cnt_old / norm * stats[l_i][subtype][key]
                        tmp_new = (new_stats[l_i][subtype]['cnt']
                                   / norm * new_stats[l_i][subtype][key])
                        stats[l_i][subtype][key] = tmp_old + tmp_new
        return stats
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _update_statistics(self, new_stats, stats):
        new_stats = create_dict(new_stats)
        if stats is None:
            stats = new_stats
            return stats

        # update the stats layerwise
        for l_i in range(len(stats)):

            for subtype,_ in subtypes:
                # TODO: Have to check the type to see if this is needed
                cnt_old = 1.0 * stats[l_i][subtype]['cnt']
                stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
                                              + new_stats[l_i][subtype]['cnt'])
                norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)

                for key in subtype_keys:
                    if key not in subtype_keys_no_aggregation:
                        tmp_old = cnt_old / norm * stats[l_i][subtype][key]
                        tmp_new = (new_stats[l_i][subtype]['cnt']
                                   / norm * new_stats[l_i][subtype][key])
                        stats[l_i][subtype][key] = tmp_old + tmp_new
        return stats
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def _readPPN(self, fname, sldir):
        '''
        Private method that reads in and organizes the .ppn file
        Loads the data of the .ppn file into the variable cols.

        '''
        if sldir.endswith(os.sep):
                    #Making sure fname will be formatted correctly
            fname = str(sldir)+str(fname)
        else:
            fname = str(sldir)+os.sep+str(fname)
            self.sldir+=os.sep
        f=open(fname,'r')
        lines=f.readlines()
        for i in range(len(lines)):
            lines[i]=lines[i].strip()

        cols = ['ISOTP', 'ABUNDANCE_MF'] #These are constant, .ppn files have no header to read from
        for i in range(len(lines)):
            if not lines[i].startswith('H'):
                index = i-1
                break

        return cols, index
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def get_svnpath():
    '''
    This subroutine gives back the path of the whole svn tree
    installation, which is necessary for the script to run.

    '''
    svnpathtmp = __file__
    splitsvnpath = svnpathtmp.split('/')
    if len(splitsvnpath) == 1:
        svnpath = os.path.abspath('.') + '/../../'
    else:
        svnpath = ''
        for i in range(len(splitsvnpath)-3):
            svnpath += splitsvnpath[i] + '/'
    return svnpath



############################
##### BIG PREPROCESSOR #####
############################
# subroutine that reads in data and splits into nice numpy arrays
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def load_chart_files(path = '.'):
    n = 39
    nsparsity = 10
    for cycle in range(0,n,nsparsity):
        cycle_str = str(cycle).zfill(2)
        os.system("wget -q --content-disposition --directory '" + path + "' "
                  +"'http://www.canfar.phys.uvic.ca/vospace/synctrans?TARGET="\
                  +"vos%3A%2F%2Fcadc.nrc.ca%21vospace%2Fnugrid%2Fdata%2Fprojects%2Fppn%2Fexamples%2F"\
                  +"ppn_Hburn_simple%2Fiso_massf000"+cycle_str+".DAT&DIRECTION=pullFromVoSpace&PROTOCOL"\
                  "=ivo%3A%2F%2Fivoa.net%2Fvospace%2Fcore%23httpget'")
        os.system("wget -q --content-disposition --directory '" + path + "' "
                  +"'http://www.canfar.phys.uvic.ca/vospace/synctrans?TARGET="\
                  +"vos%3A%2F%2Fcadc.nrc.ca%21vospace%2Fnugrid%2Fdata%2Fprojects%2Fppn%2Fexamples%2F"\
                  +"ppn_Hburn_simple%2FMasterAbuChart"+cycle_str+".png&DIRECTION=pullFromVoSpace&PROTOCOL"\
                  "=ivo%3A%2F%2Fivoa.net%2Fvospace%2Fcore%23httpget'")
    a=p.abu_vector(path)
    a.abu_chart(list(range(0,n,nsparsity)),plotaxis=[-1,16,-1,15], savefig=True, path=path)
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def compare_images(path = '.'):
     S_limit = 10.
     file_list = glob.glob(os.path.join(path, 'Abu*'))
     file_list_master = glob.glob(os.path.join(path, 'MasterAbu*'))
     file_list.sort()
     file_list_master.sort()
     S=[]
     print("Identifying images with rmq > "+'%3.1f'%S_limit)
     ierr_count = 0
     for i in range(len(file_list)):
         this_S,fimg1,fimg2 = compare_entropy(file_list[i],file_list_master[i])
         if this_S > S_limit:
              warnings.warn(file_list[i]+" and "+file_list_master[i]+" differ by "+'%6.3f'%this_S)
              ierr_count += 1
              S.append(this_S)
     if ierr_count > 0:
          print("Error: at least one image differs by more than S_limit")
          sys.exit(1)
     #print ("S: ",S)
     #plb.plot(S,'o')
     #plb.xlabel("image number")
     #plb.ylabel("modified log KL-divergence to previous image")
     #plb.show()
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def plot_prof_2(self, mod, species, xlim1, xlim2):

        """
        Plot one species for cycle between xlim1 and xlim2

        Parameters
        ----------
        mod : string or integer
            Model to plot, same as cycle number.
        species : list
            Which species to plot.
        xlim1, xlim2 : float
            Mass coordinate range.

        """

        mass=self.se.get(mod,'mass')
        Xspecies=self.se.get(mod,'yps',species)
        pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species)
        pyl.xlim(xlim1,xlim2)
        pyl.legend()
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def comparator(self, x, y):
        '''
        simple comparator method

        '''

        indX=0
        indY=0
        for i in range(len(self.stable_names)):
            if self.stable_names[i] == x[0].split('-')[0]:
                indX=i
            if self.stable_names[i] == y[0].split('-')[0]:
                indY=i

        if indX>indY:
            return 1
        if indX==indY:
            return 0
        if indX<indY:
            return -1
项目:NuGridPy    作者:NuGrid    | 项目源码 | 文件源码
def iso_abundance(self,isos):
        '''
        This routine returns the abundance of a specific isotope.
        Isotope given as, e.g., 'Si-28' or as list
        ['Si-28','Si-29','Si-30']

        '''
        if type(isos) == list:
            dumb = []
            for it in range(len(isos)):
                dumb.append(isos[it].split('-'))
            ssratio = []
            isos = dumb
            for it in range(len(isos)):
                ssratio.append(self.habu[isos[it][0].ljust(2).lower() + str(int(isos[it][1])).rjust(3)])
        else:
            isos = isos.split('-')
            ssratio = self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)]
        return ssratio
项目:opentracing-python-instrumentation    作者:uber-common    | 项目源码 | 文件源码
def singleton(func):
    """
    This decorator allows you to make sure that a function is called once and
    only once. Note that recursive functions will still work.

    Not thread-safe.
    """
    NOT_CALLED, IN_CALL, CALLED = list(range(3))

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        if wrapper.__call_state__ == CALLED:
            return
        old_state, wrapper.__call_state__ = wrapper.__call_state__, IN_CALL
        ret = func(*args, **kwargs)
        if old_state == NOT_CALLED:
            wrapper.__call_state__ = CALLED
        return ret

    wrapper.__call_state__ = NOT_CALLED
    # save original func to be able to patch and restore multiple times from
    # unit tests
    wrapper.__original_func = func
    return wrapper
项目:PyDMD    作者:mathLab    | 项目源码 | 文件源码
def reconstructed_data(self):
        """
        Get the reconstructed data.

        :return: the matrix that contains the reconstructed snapshots.
        :rtype: numpy.ndarray
        """
        data = np.sum(
            np.array(
                [
                    self.partial_reconstructed_data(i)
                    for i in range(self.max_level)
                ]
            ),
            axis=0
        )
        return data
项目:PyDMD    作者:mathLab    | 项目源码 | 文件源码
def partial_modes(self, level, node=None):
        """
        Return the modes at the specific `level` and at the specific `node`; if
        `node` is not specified, the method returns all the modes of the given
        `level` (all the nodes).

        :param int level: the index of the level from where the modes are
            extracted.
        :param int node: the index of the node from where the modes are
            extracted; if None, the modes are extracted from all the nodes of
            the given level. Default is None.
        """
        if node:
            return self._modes[self._index_list(level, node)]

        indeces = [self._index_list(level, i) for i in range(2**level)]
        return np.hstack(tuple([self._modes[idx] for idx in indeces]))
项目:PyDMD    作者:mathLab    | 项目源码 | 文件源码
def partial_eigs(self, level, node=None):
        """
        Return the eigenvalues of the specific `level` and of the specific
        `node`; if `node` is not specified, the method returns the eigenvalues
        of the given `level` (all the nodes).

        :param int level: the index of the level from where the eigenvalues is
            extracted.
        :param int node: the index of the node from where the eigenvalues is
            extracted; if None, the time evolution is extracted from all the
            nodes of the given level. Default is None.
        """
        if level >= self.max_level:
            raise ValueError(
                'The level input parameter ({}) has to be less then the max_level ({}). '
                'Remember that the starting index is 0'.format(
                    level, self.max_level
                )
            )
        if node:
            return self._eigs[self._index_list(level, node)]

        indeces = [self._index_list(level, i) for i in range(2**level)]
        return np.concatenate([self._eigs[idx] for idx in indeces])
项目:mlc2017-online    作者:machine-learning-challenge    | 项目源码 | 文件源码
def to_csv_row(json_data):

  image_id = json_data["image_id"]

  class_indexes = json_data["class_indexes"]
  predictions = json_data["predictions"]

  if isinstance(image_id, list):
    image_id = image_id[0]
    class_indexes = class_indexes[0]
    predictions = predictions[0]

  if len(class_indexes) != len(predictions):
    raise ValueError(
        "The number of indexes (%s) and predictions (%s) must be equal." 
        % (len(class_indexes), len(predictions)))

  return (image_id.decode('utf-8') + "," + " ".join("%i %f" %
      (class_indexes[i], predictions[i]) 
      for i in range(len(class_indexes))) + "\n")
项目:tfs    作者:geevi    | 项目源码 | 文件源码
def sequential(x, net, defaults = {}, name = '', reuse = None, var = {}, layers = {}):
    layers = dict(list(layers.items()) + list(predefined_layers.items()))
    y = x
    logging.info('Building Sequential Network : %s', name)

    with tf.variable_scope(name, reuse = reuse):
        for i in range(len(net)):
            ltype   = net[i][0]
            lcfg    = net[i][1] if len(net[i]) == 2 else {}
            lname   = lcfg.get('name', ltype + str(i))
            ldefs   = defaults.get(ltype, {})
            lcfg    = dict(list(ldefs.items()) + list(lcfg.items()))
            for k, v in list(lcfg.items()):
                if isinstance(v, basestring) and v[0] == '$':
                    # print var, v
                    lcfg[k] = var[v[1:]]
            y  = layers[ltype](y, lname, **lcfg)
            logging.info('\t %s \t %s', lname, y.get_shape().as_list())
        return y
项目:youtube-8m    作者:google    | 项目源码 | 文件源码
def to_csv_row(json_data):

  video_id = json_data["video_id"]

  class_indexes = json_data["class_indexes"]
  predictions = json_data["predictions"]

  if isinstance(video_id, list):
    video_id = video_id[0]
    class_indexes = class_indexes[0]
    predictions = predictions[0]

  if len(class_indexes) != len(predictions):
    raise ValueError(
        "The number of indexes (%s) and predictions (%s) must be equal." 
        % (len(class_indexes), len(predictions)))

  return (video_id.decode('utf-8') + "," + " ".join("%i %f" % 
      (class_indexes[i], predictions[i]) 
      for i in range(len(class_indexes))) + "\n")
项目:veros    作者:dionhaefner    | 项目源码 | 文件源码
def run(self):
        a, b, c, d = (np.random.randn(self.nx, self.ny, self.nz) for _ in range(4))

        out_legacy = np.zeros((self.nx, self.ny, self.nz))
        for i in range(self.nx):
            for j in range(self.ny):
                out_legacy[i, j] = self.veros_legacy.fortran.solve_tridiag(
                    a=a[i,j], b=b[i,j], c=c[i,j], d=d[i,j], n=self.nz)

        if self.veros_new.backend_name == "bohrium":
            a, b, c, d = (bh.array(v) for v in (a, b, c, d))

        out_new = numerics.solve_tridiag(self.veros_new, a, b, c, d)
        passed = np.allclose(out_legacy, out_new)

        return passed
项目:transpyler    作者:Transpyler    | 项目源码 | 文件源码
def range(*args):
    """
    range(stop) -> range object
    range(start, stop[, step]) -> range object

    Return an object that produces a sequence of integers from start (inclusive)
    to stop (exclusive) by step.  range(i, j) produces i, i+1, i+2, ..., j-1.
    start defaults to 0, and stop is omitted!  range(4) produces 0, 1, 2, 3.
    These are exactly the valid indices for a list of 4 elements.
    When step is given, it specifies the increment (or decrement).
    """
    return _range(*args)


#
# Time control
#
项目:Video-Classification    作者:boyaolin    | 项目源码 | 文件源码
def to_csv_row(json_data):

  video_id = json_data["video_id"]

  class_indexes = json_data["class_indexes"]
  predictions = json_data["predictions"]

  if isinstance(video_id, list):
    video_id = video_id[0]
    class_indexes = class_indexes[0]
    predictions = predictions[0]

  if len(class_indexes) != len(predictions):
    raise ValueError(
        "The number of indexes (%s) and predictions (%s) must be equal." 
        % (len(class_indexes), len(predictions)))

  return (video_id.decode('utf-8') + "," + " ".join("%i %f" % 
      (class_indexes[i], predictions[i]) 
      for i in range(len(class_indexes))) + "\n")
项目:KiField    作者:xesscorp    | 项目源码 | 文件源码
def explode(collapsed):
    '''Explode references like 'C1-C3,C7,C10-C13' into [C1,C2,C3,C7,C10,C11,C12,C13]'''

    if collapsed == '':
        return []
    individual_refs = []
    if isinstance(collapsed, str) or isinstance(collapsed, basestring):
        range_refs = re.split(',|;', collapsed)
        for r in range_refs:
            mtch = re.match(
                '^\s*(?P<part_prefix>\D+)(?P<range_start>\d+)\s*[-:]\s*\\1(?P<range_end>\d+)\s*$',
                r)
            if mtch is None:
                individual_refs.append(r.strip())
            else:
                part_prefix = mtch.group('part_prefix')
                range_start = int(mtch.group('range_start'))
                range_end = int(mtch.group('range_end'))
                for i in range(range_start, range_end + 1):
                    individual_refs.append(part_prefix + str(i))
    logger.log(DEBUG_OBSESSIVE, 'Exploding {} => {}.'.format(collapsed,
                                                             individual_refs))
    return individual_refs
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def test_activeDoesNotDisableCircuitBreaker(self):
        """
        If a Node has been disabled by a CircuitBreaker then NodeActive with same
        Node doesn't re-enable it.
        """
        disco = create_disco()
        node = create_node("somewhere")
        disco.onMessage(None, NodeActive(node))
        resolved_node = resolve(disco, "myservice", "1.0")
        # Uh-oh it's a pretty broken node:
        for i in range(10):
            resolved_node.failure()

        node = create_node("somewhere")
        disco.onMessage(None, NodeActive(node))
        resolved_node2 = resolve(disco, "myservice", "1.0")
        self.assertEqual(resolved_node2, None)
        resolved_node.success()
        self.assertNodesEqual(resolve(disco, "myservice", "1.0"), node)
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def test_replaceDoesNotDisableCircuitBreaker(self):
        """
        If a Node has been disabled by a CircuitBreaker then ReplaceCluster with
        same Node doesn't re-enable it.
        """
        disco = create_disco()
        node = create_node("somewhere")
        disco.onMessage(None, NodeActive(node))
        resolved_node = resolve(disco, "myservice", "1.0")
        # Uh-oh it's a pretty broken node:
        for i in range(10):
            resolved_node.failure()

        node = create_node("somewhere")
        disco.onMessage(None, ReplaceCluster("myservice",
                                             SANDBOX_ENV,
                                             [node]))
        resolved_node2 = resolve(disco, "myservice", "1.0")
        self.assertEqual(resolved_node2, None)
        resolved_node.success()
        self.assertNodesEqual(resolve(disco, "myservice", "1.0"), node)
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def test_environmentInheritance(self):
        """
        If an Environment has a parent it is checked if there have never been Nodes
        with that service name registered in the child Environment.
        """
        # In the parent only
        node = create_node("somewhere", "myservice", "parent")
        # In the child
        node2 = create_node("somewhere2", "myservice2", "parent:child")
        disco = create_disco()
        disco.onMessage(None, NodeActive(node))
        disco.onMessage(None, NodeActive(node2))
        # Do repeatedly in case round robin is somehow tricking us:
        for i in range(10):
            self.assertEqual(resolve(disco, "myservice", "1.0", "parent:child").address,
                             "somewhere")
        for i in range(10):
            self.assertEqual(resolve(disco, "myservice2", "1.0", "parent:child").address,
                             "somewhere2")
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def test_successReset(self):
        """
        A successful connection resets the threshold for a Node becoming
        unavailable.
        """
        for i in range(3):
            self.circuit_breaker.failure()
        self.circuit_breaker.success()
        available0 = self.circuit_breaker.available()
        self.circuit_breaker.failure()
        available1 = self.circuit_breaker.available()
        self.circuit_breaker.failure()
        available2 = self.circuit_breaker.available()
        self.circuit_breaker.failure()
        available3 = self.circuit_breaker.available()
        available4 = self.circuit_breaker.available()
        self.assertEqual((available0, available1, available2, available3, available4),
                         (True, True, True, False, False))
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def testResolveBreaker(self):
        disco = self.createDisco()
        sev = self.startDisco()

        n1 = self.doActive(sev, "svc", "addr1", "1.0.0")
        n2 = self.doActive(sev, "svc", "addr2", "1.0.0")

        p = disco.resolve("svc", "1.0", SANDBOX_ENV)
        self.assertEqualNodes(n1, p.value().getValue())
        p = disco.resolve("svc", "1.0", SANDBOX_ENV)
        self.assertEqualNodes(n2, p.value().getValue())

        failed = p.value().getValue()
        fpfactory = disco._fpfactory
        for idx in range(fpfactory.threshold):
            failed.failure()

        p = disco.resolve("svc", "1.0", SANDBOX_ENV)
        self.assertEqualNodes(n1, p.value().getValue())
        p = disco.resolve("svc", "1.0", SANDBOX_ENV)
        self.assertEqualNodes(n1, p.value().getValue())
项目:mdk    作者:datawire    | 项目源码 | 文件源码
def testLoadBalancing(self):
        disco = self.createDisco()
        sev = self.startDisco()

        promise = disco.resolve("svc", "1.0", SANDBOX_ENV)
        self.assertEqual(False, promise.value().hasValue())

        active = Active()

        count = 10
        for idx in range(count):
            active.node = Node()
            active.node.id = str(uuid4())
            active.node.service = "svc"
            active.node.address = "addr" + str(idx)
            active.node.version = "1.2.3"
            sev.send(active.encode())
            idx = idx + 1
        self.connector.pump()

        for idx in range(count*10):
            node = disco.resolve("svc", "1.0", SANDBOX_ENV).value().getValue()
            assert node is not None
            self.assertEqual("addr" + str(idx % count), node.address)
项目:pefile-tests    作者:viper-framework    | 项目源码 | 文件源码
def test_selective_loading_integrity(self):
        """Verify integrity of loading the separate elements of the file as
        opposed to do a single pass.
        """

        control_file = os.path.join(REGRESSION_TESTS_DIR, 'MSVBVM60.DLL')
        pe = pefile.PE(control_file, fast_load=True)
        # Load the 16 directories.
        pe.parse_data_directories(directories=list(range(0x10)))

        # Do it all at once.
        pe_full = pefile.PE(control_file, fast_load=False)

        # Verify both methods obtained the same results.
        self.assertEqual(pe_full.dump_info(), pe.dump_info())

        pe.close()
        pe_full.close()
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __init__(self, stage_depth):
        nfms = [2**(stage + 4) for stage in sorted(list(range(3)) * stage_depth)]
        print(nfms)
        strides = [1 if cur == prev else 2 for cur, prev in zip(nfms[1:], nfms[:-1])]

        layers = [Preprocess(functor=cifar_mean_subtract),
                  Convolution(**conv_params(3, 16)),
                  f_module(nfms[0], first=True)]

        for nfm, stride in zip(nfms[1:], strides):
            layers.append(f_module(nfm, strides=stride))

        layers.append(BatchNorm())
        layers.append(Activation(Rectlin()))
        layers.append(Pooling((8, 8), pool_type='avg'))
        layers.append(Affine(axes=ax.Y,
                             weight_init=KaimingInit(),
                             activation=Softmax()))
        super(residual_network, self).__init__(layers=layers)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def get_random_shape(max_num_axes, max_axis_length):
    """
    TODO.

    Arguments:
      max_num_axes: TODO
      max_axis_length: TODO

    Returns:
      TODO
    """
    assert max_num_axes >= 2
    num_axes = 0

    while num_axes < 2:
        num_axes = random.randint(0, max_num_axes)
        shape = ()
        for i in range(num_axes):
            shape += (random.randint(0, max_axis_length),)
    return shape
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def create_vgg9_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.For(range(2), lambda: [
                C.layers.Dense(1024, init=C.initializer.glorot_uniform())
            ]),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])
    return model(input)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def from_other(self, tensor, dest=None):
        """
        Copies from another GPUArray with the same dimensions into this tensor. Handles
        discontiguous strides.

        Arguments:
            tensor (GPUArray): Contiguous tensor with same dimensions to use as source
        """
        if dest is None:
            dest = self.tensor

        src_strides = [s // tensor.dtype.itemsize for s in tensor.strides]
        dst_strides = [s // dest.dtype.itemsize for s in dest.strides]
        kernel = _get_copy_transpose_kernel(tensor.dtype,
                                            tensor.shape,
                                            range(len(tensor.shape)))
        params = [dest.gpudata, tensor.gpudata] + list(kernel.args)
        params = params + src_strides + dst_strides
        kernel.prepared_async_call(kernel.grid, kernel.block, None, *params)
项目:carto-etl    作者:CartoDB    | 项目源码 | 文件源码
def chunks(full_list, chunk_size, start_chunk=1, end_chunk=None):
    finished = False
    while finished is False:
        chunk = []
        for chunk_num in range(chunk_size):
            if chunk_num < (start_chunk - 1):
                continue

            if end_chunk is not None and chunk_num >= end_chunk:
                return

            try:
                chunk.append(next(full_list))
            except StopIteration:
                finished = True
                if len(chunk) > 0:
                    continue
                else:
                    return
        yield chunk
项目:carto-etl    作者:CartoDB    | 项目源码 | 文件源码
def send(self, query, file_encoding, chunk_num):
        if sys.version_info <= (3, 0):
            query = query.decode(file_encoding).encode(UTF8)
        logger.debug("Chunk #{chunk_num}: {query}".
                    format(chunk_num=(chunk_num + 1), query=query))
        for retry in range(self.max_attempts):
            try:
                self.sql.send(query)
            except Exception as e:
                logger.warning("Chunk #{chunk_num}: Retrying ({error_msg})".
                               format(chunk_num=(chunk_num + 1), error_msg=e))
                self.notify('error', e)
            else:
                logger.info("Chunk #{chunk_num}: Success!".
                            format(chunk_num=(chunk_num + 1)))
                self.notify('progress', chunk_num + 1)
                break
        else:
            logger.error("Chunk #{chunk_num}: Failed!)".
                         format(chunk_num=(chunk_num + 1)))
            self.notify('error', "Failed " + str(chunk_num + 1))
项目:Youtube-8M-WILLOW    作者:antoine77340    | 项目源码 | 文件源码
def to_csv_row(json_data):

  video_id = json_data["video_id"]

  class_indexes = json_data["class_indexes"]
  predictions = json_data["predictions"]

  if isinstance(video_id, list):
    video_id = video_id[0]
    class_indexes = class_indexes[0]
    predictions = predictions[0]

  if len(class_indexes) != len(predictions):
    raise ValueError(
        "The number of indexes (%s) and predictions (%s) must be equal." 
        % (len(class_indexes), len(predictions)))

  return (video_id.decode('utf-8') + "," + " ".join("%i %f" % 
      (class_indexes[i], predictions[i]) 
      for i in range(len(class_indexes))) + "\n")
项目:awslogin    作者:byu-oit    | 项目源码 | 文件源码
def __prompt_for_role(account_name, role_names):
    border = ""
    spaces = ""
    for index in range(len(account_name)):
        border = "-" + border
        spaces = " " + spaces

    print('{}#------------------------------------------------{}#'.format(Colors.lblue,border))
    print('#   {}You have access to the following roles in {}{}{}   #'.format(Colors.white,Colors.yellow,account_name,Colors.lblue))
    print('#   {}Which role would you like to assume?{}{}         #'.format(Colors.white,Colors.lblue,spaces))
    print('#------------------------------------------------{}#{}'.format(border,Colors.normal))

    for index, role_name in enumerate(role_names):
        if role_name == "AccountAdministrator":
            print("\t{}{}  {}{}".format(Colors.red, str(index).rjust(2), role_name,Colors.normal))
        else:
            print("\t{}{}{}  {}{}".format(Colors.white, str(index).rjust(2), Colors.cyan, role_name, Colors.normal))

    while True:
        choice = input('{}Select role: {}'.format(Colors.lblue, Colors.normal))
        try:
            return role_names[int(choice)]
        except:
            maximum = len(role_names) - 1
            print('{}Please enter an integer between 0 and {}{}'.format(Colors.lred, maximum, Colors.normal))
项目:fastxml    作者:Refefer    | 项目源码 | 文件源码
def sparse_rows_iter(sparse):
    indptr, indices, data = sparse.indptr, sparse.indices, sparse.data
    for startIdx in range(indptr.shape[0] - 1):
        start, stop = indptr[startIdx], indptr[startIdx+1]

        sparse_lines = []
        for i in range(start, stop):
            sparse_lines.append(indices[i])
            sparse_lines.append(data[i])

        # Pack into struct
        n = stop - start
        size = struct.pack('I', n)
        rest = struct.pack('If' * n, *sparse_lines)

        yield size + rest
项目:fastxml    作者:Refefer    | 项目源码 | 文件源码
def generate_idxs(self, dataset_len):
        if self.subsample == 1:
            return repeat(list(range(dataset_len)))

        batch_size = int(dataset_len * self.subsample) \
                if self.subsample < 1 else self.subsample

        if batch_size > dataset_len:
            raise Exception("dataset subset is larger than dataset")

        def gen(bs):
            rs = np.random.RandomState(seed=self.seed + 1000)
            idxs = list(range(dataset_len))
            while True:
                rs.shuffle(idxs)
                yield idxs[:bs]

        return gen(batch_size)
项目:fastxml    作者:Refefer    | 项目源码 | 文件源码
def metric_cluster(y, weights=None, max_leaf_size=10, 
        sparse_multiple=25, seed=2016, verbose=False):

    rs = np.random.RandomState(seed=seed)
    n_labels = max(yi for ys in y for yi in ys) + 1
    if weights is None:
        weights = np.ones(n_labels, dtype='float32')

    # Initialize splitter
    splitter = Splitter(y, weights, sparse_multiple)

    def _metric_cluster(idxs):
        if verbose and len(idxs) > 1000:
            print("Splitting:", len(idxs))

        if len(idxs) < max_leaf_size:
            return MetricLeaf(idxs)

        left, right = splitter.split_node(idxs, rs)
        if not left or not right:
            return MetricLeaf(idxs)

        return MetricNode(_metric_cluster(left), _metric_cluster(right))

    return _metric_cluster(list(range(len(y))))
项目:fastxml    作者:Refefer    | 项目源码 | 文件源码
def predict(self, X, fmt='sparse'):
        assert fmt in ('sparse', 'dict')
        s = []
        num = X.shape[0] if isinstance(X, sp.csr_matrix) else len(X)
        for i in range(num):
            Xi = X[i]
            mean = self.predictor.predict(Xi.data, Xi.indices, 
                    self.blend, self.gamma, self.leaf_probs)

            if fmt == 'sparse':
                s.append(mean)

            else:
                od = OrderedDict()
                for idx in reversed(mean.data.argsort()):
                    od[mean.indices[idx]] = mean.data[idx]

                s.append(od)

        if fmt == 'sparse':
            return sp.vstack(s)

        return s
项目:hakkuframework    作者:4shadoww    | 项目源码 | 文件源码
def issubset(list1, list2):
    """
    Examples:

    >>> issubset([], [65, 66, 67])
    True
    >>> issubset([65], [65, 66, 67])
    True
    >>> issubset([65, 66], [65, 66, 67])
    True
    >>> issubset([65, 67], [65, 66, 67])
    False
    """
    n = len(list1)
    for startpos in range(len(list2) - n + 1):
        if list2[startpos:startpos+n] == list1:
            return True
    return False