Python past.builtins 模块,xrange() 实例源码

我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用past.builtins.xrange()

项目:selfMachineLearning    作者:xhappy    | 项目源码 | 文件源码
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
  """
  sample a few random elements and only return numerical
  in this dimensions.
  """

  for i in xrange(num_checks):
    ix = tuple([randrange(m) for m in x.shape])

    oldval = x[ix]
    x[ix] = oldval + h # increment by h
    fxph = f(x) # evaluate f(x + h)
    x[ix] = oldval - h # increment by h
    fxmh = f(x) # evaluate f(x - h)
    x[ix] = oldval # reset

    grad_numerical = (fxph - fxmh) / (2 * h)
    grad_analytic = analytic_grad[ix]
    rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))
    print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def compute_distances_one_loop(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using a single loop over the test data.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train))
    for i in xrange(num_test):
      #######################################################################
      # TODO:                                                               #
      # Compute the l2 distance between the ith test point and all training #
      # points, and store the result in dists[i, :].                        #
      #######################################################################
      pass
      #######################################################################
      #                         END OF YOUR CODE                            #
      #######################################################################
    return dists
项目:monkeys    作者:hchasestevens    | 项目源码 | 文件源码
def build_tree(return_type, allowed_functions=None, convert=True, selection_strategy=None):
    if allowed_functions is not None:
        allowed_functions = frozenset(allowed_functions)
    starting_functions = find_functions(return_type, allowed_functions, convert)
    for __ in xrange(99999):
        try:
            return Node(
                random.choice(starting_functions), 
                allowed_functions=allowed_functions,
                selection_strategy=selection_strategy,
            )
        except RuntimeError:
            pass
    raise TreeConstructionError(
        "Unable to construct program, consider raising recursion depth limit."
    )
项目:LTTL    作者:axanthos    | 项目源码 | 文件源码
def to_numpy(self):
        """Return a numpy array with the content of a crosstab"""

        # Set numpy table type based on the crosstab's type...
        if isinstance(self, IntPivotCrosstab):
            np_type = np.dtype(np.int32)
        elif isinstance(self, PivotCrosstab):
            np_type = np.dtype(np.float32)

        # Initialize numpy table...
        np_table = np.empty([len(self.row_ids), len(self.col_ids)], np_type)
        np_table.fill(self.missing or 0)

        # Fill and return numpy table...
        for row_idx in xrange(len(self.row_ids)):
            for col_idx in xrange(len(self.col_ids)):
                try:
                    np_table[row_idx][col_idx] = self.values[
                        (self.row_ids[row_idx], self.col_ids[col_idx])
                    ]
                except KeyError:
                    pass
        return np_table

    # TODO: test.
项目:lib-gatilegrid    作者:geoadmin    | 项目源码 | 文件源码
def iterGrid(self, minZoom, maxZoom):
        "Yields the tileBounds, zoom, tileCol and tileRow"
        assert minZoom in range(0, len(self.RESOLUTIONS))
        assert maxZoom in range(0, len(self.RESOLUTIONS))
        assert minZoom <= maxZoom

        for zoom in xrange(minZoom, maxZoom + 1):
            [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
            for row in xrange(minRow, maxRow + 1):
                for col in xrange(minCol, maxCol + 1):
                    tileBounds = self.tileBounds(zoom, col, row)
                    yield (tileBounds, zoom, col, row)
项目:lib-gatilegrid    作者:geoadmin    | 项目源码 | 文件源码
def totalNumberOfTiles(self, minZoom=None, maxZoom=None):
        "Return the total number of tiles for this instance extent"
        nbTiles = 0
        minZoom = minZoom or 0
        if maxZoom:
            maxZoom = maxZoom + 1
        else:
            maxZoom = len(self.RESOLUTIONS)
        for zoom in xrange(minZoom, maxZoom):
            nbTiles += self.numberOfTilesAtZoom(zoom)
        return nbTiles
项目:lib-gatilegrid    作者:geoadmin    | 项目源码 | 文件源码
def __iter__(self):
        for col in xrange(0, self.nbCellsX):
            for row in xrange(0, self.nbCellsY):
                cellExtent = self.cellExtent(col, row)
                yield (cellExtent, col, row)
项目:Vector-Tiles-Reader-QGIS-Plugin    作者:geometalab    | 项目源码 | 文件源码
def _chunker(self, seq, size):
        return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
项目:selfMachineLearning    作者:xhappy    | 项目源码 | 文件源码
def visualize_grid(Xs, ubound=255.0, padding=1):
  """
  Reshape a 4D tensor of image data to a grid for easy visualization.

  Inputs:
  - Xs: Data of shape (N, H, W, C)
  - ubound: Output grid will have values scaled to the range [0, ubound]
  - padding: The number of blank pixels between elements of the grid
  """
  (N, H, W, C) = Xs.shape
  grid_size = int(ceil(sqrt(N)))
  grid_height = H * grid_size + padding * (grid_size - 1)
  grid_width = W * grid_size + padding * (grid_size - 1)
  grid = np.zeros((grid_height, grid_width, C))
  next_idx = 0
  y0, y1 = 0, H
  for y in xrange(grid_size):
    x0, x1 = 0, W
    for x in xrange(grid_size):
      if next_idx < N:
        img = Xs[next_idx]
        low, high = np.min(img), np.max(img)
        grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
        # grid[y0:y1, x0:x1] = Xs[next_idx]
        next_idx += 1
      x0 += W + padding
      x1 += W + padding
    y0 += H + padding
    y1 += H + padding
  # grid_max = np.max(grid)
  # grid_min = np.min(grid)
  # grid = ubound * (grid - grid_min) / (grid_max - grid_min)
  return grid
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def compute_distances_two_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using a nested loop over both the training data and the 
    test data.

    Inputs:
    - X: A numpy array of shape (num_test, D) containing test data.

    Returns:
    - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
      is the Euclidean distance between the ith test point and the jth training
      point.
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train))
    for i in xrange(num_test):
      for j in xrange(num_train):
        #####################################################################
        # TODO:                                                             #
        # Compute the l2 distance between the ith test point and the jth    #
        # training point, and store the result in dists[i, j]. You should   #
        # not use a loop over dimension.                                    #
        #####################################################################
        pass
        #####################################################################
        #                       END OF YOUR CODE                            #
        #####################################################################
    return dists
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def predict_labels(self, dists, k=1):
    """
    Given a matrix of distances between test points and training points,
    predict a label for each test point.

    Inputs:
    - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
      gives the distance betwen the ith test point and the jth training point.

    Returns:
    - y: A numpy array of shape (num_test,) containing predicted labels for the
      test data, where y[i] is the predicted label for the test point X[i].  
    """
    num_test = dists.shape[0]
    y_pred = np.zeros(num_test)
    for i in xrange(num_test):
      # A list of length k storing the labels of the k nearest neighbors to
      # the ith test point.
      closest_y = []
      #########################################################################
      # TODO:                                                                 #
      # Use the distance matrix to find the k nearest neighbors of the ith    #
      # testing point, and use self.y_train to find the labels of these       #
      # neighbors. Store these labels in closest_y.                           #
      # Hint: Look up the function numpy.argsort.                             #
      #########################################################################
      pass
      #########################################################################
      # TODO:                                                                 #
      # Now that you have found the labels of the k nearest neighbors, you    #
      # need to find the most common label in the list closest_y of labels.   #
      # Store this label in y_pred[i]. Break ties by choosing the smaller     #
      # label.                                                                #
      #########################################################################
      pass
      #########################################################################
      #                           END OF YOUR CODE                            # 
      #########################################################################

    return y_pred
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def softmax_loss_vectorized(W, X, y, reg):
  """
  Softmax loss function, vectorized version.

  Inputs and outputs are the same as softmax_loss_naive.
  """
  # Initialize the loss and gradient to zero.
  num_train = X.shape[0]
  loss = 0.0
  dW = np.zeros_like(W)

  #############################################################################
  # TODO: Compute the softmax loss and its gradient using no explicit loops.  #
  # Store the loss in loss and the gradient in dW. If you are not careful     #
  # here, it is easy to run into numeric instability. Don't forget the        #
  # regularization!                                                           #
  #############################################################################
  scores = X.dot(W)
  scores -= np.max(scores, axis=1, keepdims=True)
  # print scores.shape
  pscores = np.exp(scores)
  pscores_norm = pscores/np.sum(pscores, axis=1, keepdims=True)
  loss = np.sum(-scores[xrange(num_train),y] + np.log(np.sum(pscores, axis=1)))

  pscores_norm[xrange(num_train),y] -= 1
  dW = X.T.dot(pscores_norm)

  loss /= num_train
  loss += 0.5*reg*np.sum(W*W)

  dW /= num_train
  dW += reg * W

  #############################################################################
  #                          END OF YOUR CODE                                 #
  #############################################################################

  return loss, dW
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def make_duration_signal(rng, length):
    duration_signal = np.zeros(length)
    for i in xrange(0,length[0], 1):
        duration_signal[i] = rng.randint(1,9,1)
    return duration_signal


######              Create target signal
########################################
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def make_target_signal(start_signal, duration_signal):
    target_signal = np.zeros([start_signal.shape[0], 2])
    counter = 0
    for i in xrange(target_signal.shape[0]):
        if start_signal[i] == 1:
            counter = duration_signal[i]
        if counter > 0:
            target_signal[i, 0] = 1
            counter -= 1
    target_signal[:,1] = 1 - target_signal[:,0]
    return target_signal


######                   Create data set
########################################
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def make_data_set(rng, samples):
    input_data = []
    output_data = []
    for i in xrange(samples):
        length = rng.randint(100,200,1)
        start_signal = make_start_signal(rng, length)
        duration_signal = make_duration_signal(rng, length)
        target_signal = make_target_signal(start_signal, duration_signal)
        input_data.append(np.concatenate([start_signal.reshape([length[0],1]),duration_signal.reshape([length[0],1])],axis=1))
        output_data.append(target_signal)
    return input_data, output_data


######                Create klepto file
########################################
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def rec_ortho(self, rng, ndim, ndim_factor):
        W = np.concatenate([self.sqr_ortho(rng, ndim) for i in xrange(ndim_factor)], axis=1)
        return W
项目:recnet    作者:joergfranke    | 项目源码 | 文件源码
def pass_structure_dict(self, prm_structure):

        if "net_size" in prm_structure:
            self.struct["net_size"      ] = prm_structure["net_size"]
            self.struct["hidden_layer"  ] = prm_structure["net_size"].__len__() - 2
        else:
            raise Warning("No net size")

        if "net_unit_type" in prm_structure:
            self.struct["net_unit_type"      ] = prm_structure["net_unit_type"]
            if  prm_structure["net_unit_type"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and unit type have no equal length")
        else:
            raise Warning("No net unit type")

        if "net_act_type" in prm_structure:
            self.struct["net_act_type"      ] = prm_structure["net_act_type"]
            if  prm_structure["net_act_type"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and act type have no equal length")
        else:
            self.struct["net_act_type" ] = ['tanh' for i in xrange(prm_structure["net_size"].__len__())]

        if "net_arch" in prm_structure:
            self.struct["net_arch"      ] = prm_structure["net_arch"]
            if  prm_structure["net_arch"].__len__() != self.struct["net_size" ].__len__():
                raise Warning("Net size and net architecture have no equal length")
        else:
            raise Warning("No network architecture 'net_arch' ")

        self.struct["weight_numb"] = 0


        if "identity_func" in prm_structure: #(currently corrupted)
            self.struct["identity_func"] = prm_structure["identity_func"]
        else:
            self.struct["identity_func"] = False


    ##### Passes parameters in optimize dictionary
    ########################################
项目:packaging    作者:blockstack    | 项目源码 | 文件源码
def test_baddecorator(self):
        data = 'The quick Brown fox Jumped over The lazy Dog'.split()
        self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)

# def _run_unittest(*args):
#     # with check_py3k_warnings(
#     #         (".+ not supported in 3.x", DeprecationWarning),
#     #         (".+ is renamed to imp.reload", DeprecationWarning),
#     #         ("classic int division", DeprecationWarning)):
#     if True:
#         run_unittest(*args)
# 
# def test_main(verbose=None):
#     test_classes = (BuiltinTest, TestSorted)
# 
#     _run_unittest(*test_classes)
# 
#     # verify reference counting
#     if verbose and hasattr(sys, "gettotalrefcount"):
#         import gc
#         counts = [None] * 5
#         for i in xrange(len(counts)):
#             _run_unittest(*test_classes)
#             gc.collect()
#             counts[i] = sys.gettotalrefcount()
#         print(counts)
项目:quantized-mesh-tile    作者:loicgasser    | 项目源码 | 文件源码
def getTrianglesCoordinates(self):
        """
        A method to retrieve triplet of coordinates representing the triangles
        in lon,lat,height.
        """
        triangles = []
        self._computeVerticesCoordinates()
        indices = iter(self.indices)
        for i in xrange(0, len(self.indices) - 1, 3):
            vi1 = next(indices)
            vi2 = next(indices)
            vi3 = next(indices)
            triangle = (
                (self._longs[vi1],
                 self._lats[vi1],
                 self._heights[vi1]),
                (self._longs[vi2],
                 self._lats[vi2],
                 self._heights[vi2]),
                (self._longs[vi3],
                 self._lats[vi3],
                 self._heights[vi3])
            )
            triangles.append(triangle)
        if len(list(indices)) > 0:
            raise Exception('Corrupted tile')
        return triangles
项目:quantized-mesh-tile    作者:loicgasser    | 项目源码 | 文件源码
def unpackIndices(f, indicesCount, indicesType):
    indices = []
    for i in xrange(0, indicesCount):
        indices.append(
            unpackEntry(f, indicesType)
        )
    return indices
项目:quantized-mesh-tile    作者:loicgasser    | 项目源码 | 文件源码
def createCoordsPairs(l):
    coordsPairs = []
    for i in xrange(0, len(l)):
        coordsPairs.append([l[i], l[(i + 2) % len(l)]])
    return coordsPairs
项目:backtrackbb    作者:BackTrackBB    | 项目源码 | 文件源码
def init_recursive_memory(config):
    n_bands = config.n_freq_bands
    nsamples = int(config.time_lag / config.delta)
    overlap = int(config.t_overlap / config.delta)
    # Create a dictionary of memory objects
    rec_memory = dict()
    for trid, wave in itertools.product(config.trids, config.wave_type):
        # Each entry of the dictionary is a list of memory objects
        # (with n_bands elements)
        rec_memory[(trid, wave)] =\
            [RecursiveMemory(trid=trid, wave=wave, band=n,
                             nsamples=nsamples, overlap=overlap,
                             filter_npoles=config.filter_npoles)
             for n in xrange(n_bands)]
    return rec_memory
项目:monkeys    作者:hchasestevens    | 项目源码 | 文件源码
def build_tree_to_requirements(scoring_function, build_tree=build_tree):
    params = getattr(scoring_function, '__params', ())
    if len(params) != 1:
        raise ValueError("Scoring function must accept a single parameter.")
    return_type, = params

    for __ in xrange(9999):
        with recursion_limit(500):
            tree = build_tree(return_type, convert=False)
        requirements = getattr(scoring_function, 'required_inputs', ())
        if not all(req in tree for req in requirements):
            continue
        return tree

    raise UnsatisfiableType("Could not meet input requirements.")
项目:monkeys    作者:hchasestevens    | 项目源码 | 文件源码
def next_generation(
        trees, scoring_fn,
        select_fn=DEFAULT_TOURNAMENT_SELECT,
        build_tree=build_tree_to_requirements, mutate=mutate,
        crossover_rate=0.80, mutation_rate=0.01,
        score_callback=None,
        optimizations=DEFAULT_OPTIMIZATIONS
    ):
    """
    Create next generation of trees from prior generation, maintaining current
    size.
    """
    selector = select_fn(trees, scoring_fn, score_callback=score_callback, optimizations=optimizations)
    pop_size = len(trees)

    new_pop = [max(trees, key=scoring_fn)]
    for __ in xrange(pop_size - 1):
        if random.random() <= crossover_rate:
            for __ in xrange(99999):
                try:
                    new_pop.append(crossover(next(selector), next(selector)))
                    break
                except (UnsatisfiableType, RuntimeError):
                    continue
            else:
                new_pop.append(build_tree(scoring_fn))

        elif random.random() <= mutation_rate / (1 - crossover_rate):
            new_pop.append(mutate(next(selector)))

        else:
            new_pop.append(next(selector))

    return new_pop
项目:monkeys    作者:hchasestevens    | 项目源码 | 文件源码
def show_report(self, top=3):
        for exception in self.exceptions:
            print('{}:'.format(exception))
            edge_weightings = iteritems(self.edge_weightings[exception])
            for __, (edge, weight) in zip(xrange(top), edge_weightings):
                print('    {:.2f} | {}'.format(weight, edge))
项目:go2mapillary    作者:enricofer    | 项目源码 | 文件源码
def _chunker(self, seq, size):
        return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
项目:pyswarms    作者:ljvmiranda921    | 项目源码 | 文件源码
def generate_grid(self):
        """Generates the grid of hyperparameter value combinations."""

        options = dict(self.options)
        params = {}

        # Remove 'p' to hold as a constant in the paramater combinations
        p = options.pop('p')
        params['p'] = [p for _ in xrange(self.n_selection_iters)]

        # Assign generators based on parameter type
        param_generators = {
            'c1': np.random.uniform,
            'c2': np.random.uniform,
            'w': np.random.uniform,
            'k': np.random.randint
        }

        # Generate random values for hyperparameters 'c1', 'c2', 'w', and 'k'
        for idx, bounds in options.items():
            params[idx] = param_generators[idx](
                              *bounds, size=self.n_selection_iters)

        # Return list of dicts of hyperparameter combinations
        return [{'c1': params['c1'][i],
                 'c2': params['c2'][i],
                 'w': params['w'][i],
                 'k': params['k'][i],
                 'p': params['p'][i]}
                for i in xrange(self.n_selection_iters)]
项目:GooPyCharts    作者:Dfenestrator    | 项目源码 | 文件源码
def combineData(xdata,ydata,xlabel):
    #if ydata is a simple vector, encapsulate it into a 2D list
    if type(ydata[1]) is not list:
        ydata = [[val] for val in ydata]

    #if xdata is time data, add HH:MM:SS if it is missing (just 00:00:00)
    if type(xdata[1]) is str:
        #check if first 4 characters of xdata is a valid year
        if len(xdata[1]) == 10 and int(xdata[1][:4]) > 0 and int(xdata[1][:4]) < 3000:
            xdata[1:] = [val+' 00:00:00' for val in xdata[1:]]

    #figure out independent variable headers
    # if there is a title row, use that title
    if type(ydata[0][0]) is str:
        data = [[xdata[0]] + ydata[0]]
        for i in xrange(1,len(xdata)):
            data.append([xdata[i]]+ydata[i])
    # otherwise, use a default labeling
    else:
        header = [xlabel]
        for i in xrange(len(ydata[0])):
            header.append('data'+str(i+1))

        data = [header]
        for i in xrange(len(xdata)):
            data.append([xdata[i]]+ydata[i])

    return data

#helper function, returns title as a valid JS identifier, prefixed by '_'.
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def svm_loss_naive(W, X, y, reg):
  """
  Structured SVM loss function, naive implementation (with loops).

  Inputs have dimension D, there are C classes, and we operate on minibatches
  of N examples.

  Inputs:
  - W: A numpy array of shape (D, C) containing weights.
  - X: A numpy array of shape (N, D) containing a minibatch of data.
  - y: A numpy array of shape (N,) containing training labels; y[i] = c means
    that X[i] has label c, where 0 <= c < C.
  - reg: (float) regularization strength

  Returns a tuple of:
  - loss as single float
  - gradient with respect to weights W; an array of same shape as W
  """
  dW = np.zeros(W.shape) # initialize the gradient as zero

  # compute the loss and the gradient
  num_classes = W.shape[1]
  num_train = X.shape[0]
  loss = 0.0
  for i in xrange(num_train):
    scores = X[i].dot(W)
    correct_class_score = scores[y[i]]
    for j in xrange(num_classes):
      if j == y[i]:
        continue
      margin = scores[j] - correct_class_score + 1 # note delta = 1
      if margin > 0:
        loss += margin
        dW[:,j] += X[i]
        dW[:,y[i]] -= X[i]

  # Right now the loss is a sum over all training examples, but we want it
  # to be an average instead so we divide by num_train.
  loss /= num_train
  dW /= num_train

  # Add regularization to the loss.
  loss += 0.5 * reg * np.sum(W * W)
  dW += reg * W
  #############################################################################
  # TODO:                                                                     #
  # Compute the gradient of the loss function and store it dW.                #
  # Rather that first computing the loss and then computing the derivative,   #
  # it may be simpler to compute the derivative at the same time that the     #
  # loss is being computed. As a result you may need to modify some of the    #
  # code above to compute the gradient.                                       #
  #############################################################################


  return loss, dW
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def svm_loss_vectorized(W, X, y, reg):
  """
  Structured SVM loss function, vectorized implementation.

  Inputs and outputs are the same as svm_loss_naive.
  """
  num_train = X.shape[0]
  loss = 0.0
  dW = np.zeros(W.shape) # initialize the gradient as zero

  #############################################################################
  # TODO:                                                                     #
  # Implement a vectorized version of the structured SVM loss, storing the    #
  # result in loss.                                                           #
  #############################################################################
  scores = X.dot(W)
  margin = np.maximum(0, scores + 1 - scores[xrange(num_train), y][:,np.newaxis])
  margin[xrange(num_train), y] = 0
  # hinge[hinge<0] = 0
  loss = np.sum(margin)
  loss /= num_train
  loss += 0.5*reg*np.sum(W*W)
  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################
  margin[margin>0] = 1.0
  margin[xrange(num_train), y] -= np.sum(margin, axis=1)
  dW = X.T.dot(margin)/num_train + reg*W

  #############################################################################
  # TODO:                                                                     #
  # Implement a vectorized version of the gradient for the structured SVM     #
  # loss, storing the result in dW.                                           #
  #                                                                           #
  # Hint: Instead of computing the gradient from scratch, it may be easier    #
  # to reuse some of the intermediate values that you used to compute the     #
  # loss.                                                                     #
  #############################################################################

  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################

  return loss, dW
项目:packaging    作者:blockstack    | 项目源码 | 文件源码
def test_zip(self):
        a = (1, 2, 3)
        b = (4, 5, 6)
        t = [(1, 4), (2, 5), (3, 6)]
        self.assertEqual(zip(a, b), t)
        b = [4, 5, 6]
        self.assertEqual(zip(a, b), t)
        b = (4, 5, 6, 7)
        self.assertEqual(zip(a, b), t)
        class I:
            def __getitem__(self, i):
                if i < 0 or i > 2: raise IndexError
                return i + 4
        self.assertEqual(zip(a, I()), t)
        self.assertEqual(zip(), [])
        self.assertEqual(zip(*[]), [])
        self.assertRaises(TypeError, zip, None)
        class G:
            pass
        self.assertRaises(TypeError, zip, a, G())

        # Make sure zip doesn't try to allocate a billion elements for the
        # result list when one of its arguments doesn't say how long it is.
        # A MemoryError is the most likely failure mode.
        class SequenceWithoutALength:
            def __getitem__(self, i):
                if i == 5:
                    raise IndexError
                else:
                    return i
        self.assertEqual(
            zip(SequenceWithoutALength(), xrange(2**30)),
            list(enumerate(range(5)))
        )

        class BadSeq:
            def __getitem__(self, i):
                if i == 5:
                    raise ValueError
                else:
                    return i
        self.assertRaises(ValueError, zip, BadSeq(), BadSeq())
项目:quantized-mesh-tile    作者:loicgasser    | 项目源码 | 文件源码
def computeNormals(vertices, faces):
    numVertices = len(vertices)
    numFaces = len(faces)
    normalsPerFace = [None] * numFaces
    areasPerFace = [0.0] * numFaces
    normalsPerVertex = np.zeros(vertices.shape, dtype=vertices.dtype)

    for i in xrange(0, numFaces):
        face = faces[i]
        v0 = vertices[face[0]]
        v1 = vertices[face[1]]
        v2 = vertices[face[2]]
        ctrd = centroid(v0, v1, v2)

        v1A = c3d.subtract(v1, v0)
        v2A = c3d.subtract(v2, v0)
        normalA = np.cross(v1A, v2A)
        viewPointA = c3d.add(ctrd, normalA)

        normalB = np.cross(v2A, v1A)
        viewPointB = c3d.add(ctrd, normalB)

        area = triangleArea(v0, v1)
        areasPerFace[i] = area
        squaredDistanceA = c3d.magnitudeSquared(viewPointA)
        squaredDistanceB = c3d.magnitudeSquared(viewPointB)

        # Always take the furthest point
        if squaredDistanceA > squaredDistanceB:
            normalsPerFace[i] = normalA
        else:
            normalsPerFace[i] = normalB

    for i in xrange(0, numFaces):
        face = faces[i]
        weightedNormal = [c * areasPerFace[i] for c in normalsPerFace[i]]
        for j in face:
            normalsPerVertex[j] = c3d.add(normalsPerVertex[j], weightedNormal)

    for i in xrange(0, numVertices):
        normalsPerVertex[i] = c3d.normalize(normalsPerVertex[i])

    return normalsPerVertex
项目:pyBinSim    作者:pyBinSim    | 项目源码 | 文件源码
def process(self, block):
        """
        Main function

        :param block:
        :return: (outputLeft, outputRight)
        """
        # print("Convolver: process")

        # First: Fill buffer and FDLs with current block
        if not self.processStereo:
            # print('Convolver Mono Processing')
            self.fill_buffer_mono(block)
        else:
            # print('Convolver Stereo Processing')
            self.fill_buffer_stereo(block)

        # Second: Multiplikation with IR block und accumulation with previous data
        for irBlockCount in xrange(0, self.IR_blocks):
            # Always convolute current filter
            self.multiply_and_add(irBlockCount)

            # Also convolute old filter if interpolation needed
            if self.interpolate:
                self.multiply_and_add_previous(irBlockCount)

        # Third: Transformation back to time domain
        if self.interpolate:
            # fade over full block size
            # print('do block interpolation')
            self.outputLeft = np.multiply(self.resultLeftPreviousIFFTPlan(self.resultLeftFreqPrevious).real[
                                          self.block_size:self.block_size * 2], self.crossFadeOut) + \
                              np.multiply(self.resultLeftIFFTPlan(self.resultLeftFreq).real[
                                          self.block_size:self.block_size * 2], self.crossFadeIn)

            self.outputRight = np.multiply(self.resultRightPreviousIFFTPlan(self.resultRightFreqPrevious).real[
                                           self.block_size:self.block_size * 2], self.crossFadeOut) + \
                               np.multiply(self.resultRightIFFTPlan(self.resultRightFreq).real[
                                           self.block_size:self.block_size * 2], self.crossFadeIn)

        else:
            self.outputLeft = self.resultLeftIFFTPlan(self.resultLeftFreq).real[self.block_size:self.block_size * 2]
            self.outputRight = self.resultRightIFFTPlan(self.resultRightFreq).real[self.block_size:self.block_size * 2]

        self.processCounter += 1
        self.interpolate = False

        return self.outputLeft, self.outputRight
项目:LTTL    作者:axanthos    | 项目源码 | 文件源码
def to_association_matrix(self, bias='none', progress_callback=None):
        """Return a table with Markov associativities between columns
        (cf. Bavaud & Xanthos 2005, Deneulin et al. 2014)
        """
        freq = self.to_numpy()
        total_freq = freq.sum()
        sum_col = freq.sum(axis=0)
        sum_row = freq.sum(axis=1)
        exchange = np.dot(
            np.transpose(freq),
            np.dot(
                np.diag(1 / sum_row),
                freq
            )
        ) / total_freq
        if bias == 'frequent':
            output_matrix = exchange
        elif bias == 'none':
            sqrt_pi_inv = np.diag(1 / np.sqrt(sum_col / total_freq))
            output_matrix = np.dot(sqrt_pi_inv, np.dot(exchange, sqrt_pi_inv))
        else:
            pi_inv = np.diag(1 / (sum_col / total_freq))
            output_matrix = np.dot(pi_inv, np.dot(exchange, pi_inv))
        col_ids = self.col_ids
        values = dict()
        for col_id_idx1 in xrange(len(col_ids)):
            col_id1 = col_ids[col_id_idx1]
            values.update(
                dict(
                    (
                        (col_id1, col_ids[i]),
                        output_matrix[col_id_idx1, i]
                    )
                    for i in xrange(len(col_ids))
                )
            )
            if progress_callback:
                progress_callback()
        new_header_row_id = (
            self.header_row_id[:-2]
            + "2"
            + self.header_row_id[-2:]
        )
        return (
            PivotCrosstab(
                self.col_ids[:],
                self.col_ids[:],
                values,
                new_header_row_id,
                self.header_row_type,
                self.header_row_id,
                self.header_row_type,
                col_type=self.col_type.copy(),
            )
        )
项目:LTTL    作者:axanthos    | 项目源码 | 文件源码
def to_flat(self, progress_callback=None):
        """Return a copy of the crosstab in 'flat' format"""
        new_header_col_id = '__id__'
        new_header_col_type = 'string'
        new_col_ids = [self.header_row_id or '__column__']
        num_row_ids = len(self.row_ids)
        if num_row_ids > 1:
            new_col_ids.append(self.header_col_id or '__row__')
            new_cached_row_id = None
            second_col_id = new_col_ids[1]
        else:
            new_cached_row_id = self.row_ids[0]
        new_col_type = dict([(col_id, 'discrete') for col_id in new_col_ids])
        row_counter = 1
        new_values = dict()
        new_row_ids = list()
        get_count = self.values.get
        first_col_id = new_col_ids[0]
        for row_id in self.row_ids:
            for col_id in self.col_ids:
                count = get_count((row_id, col_id), 0)
                for i in xrange(count):
                    new_row_id = text(row_counter)
                    new_row_ids.append(new_row_id)
                    new_values[(new_row_id, first_col_id)] = col_id
                    if num_row_ids > 1:
                        new_values[(new_row_id, second_col_id)] = row_id
                    row_counter += 1
            if progress_callback:
                progress_callback()
        return (
            FlatCrosstab(
                new_row_ids,
                new_col_ids,
                new_values,
                header_col_id=new_header_col_id,
                header_col_type=new_header_col_type,
                col_type=new_col_type,
                class_col_id=None,
                missing=self.missing,
                _cached_row_id=new_cached_row_id,
            )
        )
项目:LTTL    作者:axanthos    | 项目源码 | 文件源码
def to_flat(self, progress_callback=None):
        """Return a copy of the crosstab in 'flat' format"""
        new_col_ids = list([c for c in self.col_ids if c != '__weight__'])
        new_col_type = dict(self.col_type)
        del new_col_type['__weight__']
        row_counter = 1
        new_values = dict()
        new_row_ids = list()
        if len(self.col_ids) > 1:
            first_col_id = self.col_ids[0]
            second_col_id = self.col_ids[1]
            for row_id in self.row_ids:
                count = self.values[(row_id, '__weight__')]
                first_col_value = self.values[row_id, first_col_id]
                second_col_value = self.values[row_id, second_col_id]
                for i in xrange(count):
                    new_row_id = text(row_counter)
                    new_row_ids.append(new_row_id)
                    new_values[(new_row_id, first_col_id)] = first_col_value
                    new_values[(new_row_id, second_col_id)] = second_col_value
                    row_counter += 1
                if progress_callback:
                    progress_callback()
        else:
            col_id = self.col_ids[0]
            for row_id in self.row_ids:
                count = self.values[(row_id, '__weight__')]
                col_value = self.values[row_id, col_id]
                for i in xrange(count):
                    new_row_id = text(row_counter)
                    new_row_ids.append(new_row_id)
                    new_values[(new_row_id, col_id)] = col_value
                    row_counter += 1
                if progress_callback:
                    progress_callback()
        return (
            FlatCrosstab(
                new_row_ids,
                new_col_ids,
                new_values,
                self.header_row_id,
                self.header_row_type,
                self.header_col_id,
                self.header_col_type,
                new_col_type,
                None,
                self.missing,
                self._cached_row_id,
            )
        )