Python numpy 模块,unravel_index() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.unravel_index()

项目:generals_a3c    作者:yilundu    | 项目源码 | 文件源码
def _parse_action(self, action):
        move_type, y, x = np.unravel_index(action, (8, self.map_height, self.map_width))
        start = y * self.map_width + x
        index = move_type % 4

        if index == 0:
            end = start + self.map_width
        elif index == 1:
            end = start + 1
        elif index == 2:
            end = start - self.map_width
        elif index == 3:
            end = start - 1
        else:
            raise("invalid index")

        is_50 = True if move_type >= 4 else False

        return {'start': start, 'end': end, 'is50': is_50}
项目:generals_a3c    作者:yilundu    | 项目源码 | 文件源码
def is_valid_move(self, start, end, player_index):
        start_label = self.label_map.flat[start]

        if end < len(self.label_map.flat) and end >= 0:
            end_label = self.label_map.flat[end]
        else:
            return False

        index = start_label - 1

        if player_index != None and (player_index != index):
            return False

        if self.army_map.flat[start] == 0:
            return False

        start_x, start_y = np.unravel_index(start, (self.map_height, self.map_width))
        end_x, end_y = np.unravel_index(end, (self.map_height, self.map_width))

        if abs(start_x - end_x) + abs(start_y - end_y) != 1:
            return False

        return True
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _sample_cond_single(rng, marginal_pmf, n_group, out, eps):
        """Single sample from conditional probab. (call :func:`self.sample`)"""
        n_sites = len(marginal_pmf[-1])
        # Probability of the incomplete output. Empty output has unit probab.
        out_p = 1.0
        # `n_out` sites of the output have been sampled. We will add
        # at most `n_group` sites to the output at a time.
        for n_out in range(0, n_sites, n_group):
            # Select marginal probability distribution on (at most)
            # `n_out + n_group` sites.
            p = marginal_pmf[min(n_sites, n_out + n_group)]
            # Obtain conditional probab. from joint `p` and marginal `out_p`
            p = p.get(tuple(out[:n_out]) + (slice(None),) * (len(p) - n_out))
            p = project_pmf(mp.prune(p).to_array() / out_p, eps, eps)
            # Sample from conditional probab. for next `n_group` sites
            choice = rng.choice(p.size, p=p.flat)
            out[n_out:n_out + n_group] = np.unravel_index(choice, p.shape)
            # Update probability of the partial output
            out_p *= np.prod(p.flat[choice])
        # Verify we have the correct partial output probability
        p = marginal_pmf[-1].get(tuple(out)).to_array()
        assert abs(p - out_p) <= eps
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def unpack_samples(self, samples):
        """Unpack samples into several integers per sample

        Inverse of :func:`MPPovm.pack_samples`. Example:

        >>> p = pauli_mpp(nr_sites=2, local_dim=2)
        >>> p.outdims
        (6, 6)
        >>> p.unpack_samples(np.array([0, 6, 7, 12]))
        array([[0, 0],
               [1, 0],
               [1, 1],
               [2, 0]], dtype=uint8)

        """
        assert samples.ndim == 1
        assert all(dim <= 255 for dim in self.outdims)
        return np.array(np.unravel_index(samples, self.nsoutdims)) \
                 .T.astype(np.uint8)
项目:brats17    作者:xf4j    | 项目源码 | 文件源码
def batch_works(k):
    if k == n_processes - 1:
        paths = all_paths[k * int(len(all_paths) / n_processes) : ]
    else:
        paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]

    for path in paths:
        o_path = os.path.join(output_path, os.path.basename(path))
        if not os.path.exists(o_path):
            os.makedirs(o_path)
        x, y, z = perturb_patch_locations(base_locs, patch_size / 16)
        probs = generate_patch_probs(path, (x, y, z), patch_size, image_size)
        selections = np.random.choice(range(len(probs)), size=patches_per_image, replace=False, p=probs)
        image = read_image(path)
        for num, sel in enumerate(selections):
            i, j, k = np.unravel_index(sel, (len(x), len(y), len(z)))
            patch = image[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
                          int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
                          int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2), :]
            f = os.path.join(o_path, str(num))
            np.save(f, patch)
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __process_path__(self, path, next_move_only=True):
        if len(path) != 0:
            path = path[:-1]
            print("[INFO] Received path: %s" % (path))
            path_list = []
            for a in path.split('.'):
                path_list.append(int(a))
            path_list = np.unravel_index(path_list, self.imsize)
            solution_list = []
            for i in xrange(path_list[0].shape[0]):
                solution_list.append((path_list[0][i],
                                      path_list[1][i]))
            if next_move_only:
                return False, solution_list[1]
            else:
                return False, solution_list
        else:
            print("[ERROR] Errors found while running dstar algorithm.")
            return True
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def __init__(self):
        self.shape = (4, 12)

        nS = np.prod(self.shape)
        nA = 4

        # Cliff Location
        self._cliff = np.zeros(self.shape, dtype=np.bool)
        self._cliff[3, 1:-1] = True

        # Calculate transition probabilities
        P = {}
        for s in range(nS):
            position = np.unravel_index(s, self.shape)
            P[s] = { a : [] for a in range(nA) }
            P[s][UP] = self._calculate_transition_prob(position, [-1, 0])
            P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1])
            P[s][DOWN] = self._calculate_transition_prob(position, [1, 0])
            P[s][LEFT] = self._calculate_transition_prob(position, [0, -1])

        # We always start in state (3, 0)
        isd = np.zeros(nS)
        isd[np.ravel_multi_index((3,0), self.shape)] = 1.0

        super(CliffWalkingEnv, self).__init__(nS, nA, P, isd)
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def _render(self, mode='human', close=False):
        if close:
            return

        outfile = StringIO() if mode == 'ansi' else sys.stdout

        for s in range(self.nS):
            position = np.unravel_index(s, self.shape)
            # print(self.s)
            if self.s == s:
                output = " x "
            elif position == (3,7):
                output = " T "
            else:
                output = " o "

            if position[1] == 0:
                output = output.lstrip()
            if position[1] == self.shape[1] - 1:
                output = output.rstrip()
                output += "\n"

            outfile.write(output)
        outfile.write("\n")
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def joints_pred_numpy(self, img, coord = 'hm', thresh = 0.2, sess = None):
        """ Create Tensor for joint position prediction
        NON TRAINABLE
        TO CALL AFTER GENERATING GRAPH
        Notes:
            Not more efficient than Numpy, prefer Numpy for such operation!
        """
        if sess is None:
            hm = self.HG.Session.run(self.HG.pred_sigmoid , feed_dict = {self.HG.img: img})
        else:
            hm = sess.run(self.HG.pred_sigmoid , feed_dict = {self.HG.img: img})
        joints = -1*np.ones(shape = (self.params['num_joints'], 2))
        for i in range(self.params['num_joints']):
            index = np.unravel_index(hm[0,:,:,i].argmax(), (self.params['hm_size'],self.params['hm_size']))
            if hm[0,index[0], index[1],i] > thresh:
                if coord == 'hm':
                    joints[i] = np.array(index)
                elif coord == 'img':
                    joints[i] = np.array(index) * self.params['img_size'] / self.params['hm_size']
        return joints
项目:wxgen    作者:metno    | 项目源码 | 文件源码
def get_i_j(lats, lons, lat, lon):
   """
   Finds the nearest neighbour in a lat lon grid. If the point is outside the grid, the nearest
   point within the grid is still returned.

   Arguments:
      lats (np.array): 2D array of latitudes
      lons (np.array): 2D array of longitude
      lat (float): Loopup latitude
      lon (float): Loopup longitude

   Returns:
      I (int): First index into lats/lons arrays
      J (int): Second index into lats/lons arrays
   """
   dist = distance(lat, lon, lats, lons)
   indices = np.unravel_index(dist.argmin(), dist.shape)
   X = lats.shape[0]
   Y = lats.shape[1]
   I = indices[0]
   J = indices[1]
   if(indices[0] == 0 or indices[0] >= X-1 or indices[1] == 0 or indices[1] >= Y-1):
      debug("Lat/lon %g,%g outside grid" % (lat, lon))
   return I, J
项目:pyxem    作者:pyxem    | 项目源码 | 文件源码
def find_beam_position_blur(z, sigma=30):
    """Estimate direct beam position by blurring the image with a large
    Gaussian kernel and finding the maximum.

    Parameters
    ----------
    sigma : float
        Sigma value for Gaussian blurring kernel.

    Returns
    -------
    center : np.array
        np.array containing indices of estimated direct beam positon.
    """
    blurred = ndi.gaussian_filter(z, sigma)
    center = np.unravel_index(blurred.argmax(), blurred.shape)

    return np.array(center)
项目:sockeye    作者:awslabs    | 项目源码 | 文件源码
def smallest_k(matrix: np.ndarray, k: int,
               only_first_row: bool = False) -> Tuple[Tuple[np.ndarray, np.ndarray], np.ndarray]:
    """
    Find the smallest elements in a numpy matrix.

    :param matrix: Any matrix.
    :param k: The number of smallest elements to return.
    :param only_first_row: If true the search is constrained to the first row of the matrix.
    :return: The row indices, column indices and values of the k smallest items in matrix.
    """
    if only_first_row:
        flatten = matrix[:1, :].flatten()
    else:
        flatten = matrix.flatten()

    # args are the indices in flatten of the k smallest elements
    args = np.argpartition(flatten, k)[:k]
    # args are the indices in flatten of the sorted k smallest elements
    args = args[np.argsort(flatten[args])]
    # flatten[args] are the values for args
    return np.unravel_index(args, matrix.shape), flatten[args]
项目:sockeye    作者:awslabs    | 项目源码 | 文件源码
def smallest_k_mx(matrix: mx.nd.NDArray, k: int,
                  only_first_row: bool = False) -> Tuple[Tuple[np.ndarray, np.ndarray], np.ndarray]:
    """
    Find the smallest elements in a NDarray.

    :param matrix: Any matrix.
    :param k: The number of smallest elements to return.
    :param only_first_row: If True the search is constrained to the first row of the matrix.
    :return: The row indices, column indices and values of the k smallest items in matrix.
    """
    if only_first_row:
        matrix = mx.nd.reshape(matrix[0], shape=(1, -1))

    # pylint: disable=unbalanced-tuple-unpacking
    values, indices = mx.nd.topk(matrix, axis=None, k=k, ret_typ='both', is_ascend=True)

    return np.unravel_index(indices.astype(np.int32).asnumpy(), matrix.shape), values
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def partition_index_2d(self, axis):
        if not self._distributed:
           return False, self.index.grid_collection(self.center,
                                                        self.index.grids)

        xax = self.ds.coordinates.x_axis[axis]
        yax = self.ds.coordinates.y_axis[axis]
        cc = MPI.Compute_dims(self.comm.size, 2)
        mi = self.comm.rank
        cx, cy = np.unravel_index(mi, cc)
        x = np.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
        y = np.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]

        DLE, DRE = self.ds.domain_left_edge.copy(), self.ds.domain_right_edge.copy()
        LE = np.ones(3, dtype='float64') * DLE
        RE = np.ones(3, dtype='float64') * DRE
        LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
        RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
        LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
        RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
        mylog.debug("Dimensions: %s %s", LE, RE)

        reg = self.ds.region(self.center, LE, RE)
        return True, reg
项目:Splipy    作者:sintefmath    | 项目源码 | 文件源码
def _unravel_flat_index(self, i):
        """Unravels a flat index i to multi-indexes.

        :param i: Flat index
        :type i: int or slice
        :rtype: Tuple of np.array
        :raises IndexError: If the index is out of bounds
        """
        # i is int => make sure we deal with negative i properly
        # i is slice => use i.indices to compute the actual indices
        total = len(self)
        if isinstance(i, int):
            indexes = [i] if i >= 0 else [total + i]
        else:
            indexes = list(range(*i.indices(total)))

        # Convert to multi-indexes
        try:
            unraveled = np.unravel_index(indexes, self.controlpoints.shape[:-1], order='F')
        except ValueError:
            raise IndexError

        return unraveled
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def estimateBackgroundLevel(img, image_is_artefact_free=False, 
                            min_rel_size=0.05, max_abs_size=11):
    '''
    estimate background level through finding the most homogeneous area
    and take its average

    min_size - relative size of the examined area
    '''

    s0,s1 = img.shape[:2]
    s = min(max_abs_size, int(max(s0,s1)*min_rel_size))
    arr = np.zeros(shape=(s0-2*s, s1-2*s), dtype=img.dtype)

    #fill arr:
    _spatialStd(img, arr, s)
    #most homogeneous area:
    i,j = np.unravel_index(arr.argmin(), arr.shape)
    sub = img[int(i+0.5*s):int(i+s*1.5), 
              int(j+s*0.5):int(j+s*1.5)]

    return np.median(sub)
项目:interactive_mpl_tutorial    作者:tacaswell    | 项目源码 | 文件源码
def _pixel_select(self, event):

        x, y = event.xdata, event.ydata
        # get index by assuming even spacing
        # TODO use kdtree?
        diff = np.hypot((self.x_pos - x), (self.y_pos - y))
        y_ind, x_ind = np.unravel_index(np.argmin(diff), diff.shape)

        # get the spectrum for this point
        new_y_data = self.counts[y_ind, x_ind, :]
        self.mask = np.zeros(self.x_pos.shape, dtype='bool')
        self.mask[y_ind, x_ind] = True
        self.mask_im.set_data(self._overlay_image)
        self._pixel_txt.set_text(
            'pixel: [{:d}, {:d}] ({:.3g}, {:.3g})'.format(
                y_ind, x_ind,
                self.x_pos[y_ind, x_ind],
                self.y_pos[y_ind, x_ind]))

        self.spec.set_ydata(new_y_data)
        self.ax_spec.relim()
        self.ax_spec.autoscale(True, axis='y')
        self.fig.canvas.draw_idle()
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def all_out_attack(self):
        cells_to_consider_moving = []
        for square in itertools.chain.from_iterable(self.squares):
            # Do we risk undoing a multi-move capture if we move a piece that's "STILL"?
            if square.owner == self.my_id and (square.move == -1 or square.move == STILL) and square.strength > (square.production * late_game_buildup_multiplier):
                cells_to_consider_moving.append(square)

        for square in cells_to_consider_moving:
#            # Find an enemy square to attack!
#            #if (square.x + square.y) % 2 == self.frame % 2:
#            value_map = numpy.zeros((self.width, self.height))
#            #value_map += numpy.multiply(numpy.divide(self.influence_enemy_strength_map[3], self.dij_prod_distance_map[square.x, square.y]), self.combat_zone_map)
#            value_map += numpy.multiply(self.distance_map_no_decay[square.x, square.y], self.is_enemy_map)
#            tx, ty = numpy.unravel_index(value_map.argmax(), (self.width, self.height))
#            target = self.squares[tx, ty]
#            square.move_to_target(target, False)
            self.find_nearest_non_npc_enemy_direction(square)
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        self.base_value_map = np.divide(self.production_map_01, self.strength_map_1) * (self.is_neutral_map - self.combat_zone_map)
        # Each neutral cell gets assigned to the closest border non-combat cell
        global_targets_indices = np.transpose(np.nonzero(self.is_neutral_map - self.combat_zone_map))
        global_targets = [self.squares[c[0], c[1]] for c in global_targets_indices]
        self.global_border_map = np.zeros((self.w, self.h))

        for g in global_targets:
            # Find the closest border square that routes to g
            gb_map = self.dij_recov_distance_map[g.x, g.y] * (self.border_map - self.combat_zone_map)
            gb_map[gb_map == 0] = 9999
            tx, ty = np.unravel_index(gb_map.argmin(), (self.w, self.h))
            self.global_border_map[tx, ty] += self.base_value_map[g.x, g.y] / self.dij_recov_distance_map[g.x, g.y, tx, ty]

        self.value_map = 1 / np.maximum(self.base_value_map + self.global_border_map * 1, 0.001)
        print_map(self.global_border_map, "global_border_")
        print_map(self.base_value_map, "base_value_")
        print_map(self.value_map, "value_map_")
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        base_value_map = np.divide(self.production_map_01, self.strength_map_1) * (self.is_neutral_map - self.combat_zone_map)
        # Each neutral cell gets assigned to the closest border non-combat cell
        global_targets_indices = np.transpose(np.nonzero(self.is_neutral_map - self.combat_zone_map))
        global_targets = [self.squares[c[0], c[1]] for c in global_targets_indices]
        # border_squares_indices = np.transpose(np.nonzero(self.border_map - self.combat_zone_map))
        # border_squares = [self.squares[c[0], c[1]] for c in border_squares_indices]
        global_border_map = np.zeros((self.w, self.h))

        for g in global_targets:
            # Find the closest border square that routes to g
            gb_map = self.dij_recov_distance_map[g.x, g.y] * (self.border_map - self.combat_zone_map)
            gb_map[gb_map == 0] = 9999
            tx, ty = np.unravel_index(gb_map.argmin(), (self.w, self.h))
            global_border_map[tx, ty] += base_value_map[g.x, g.y] / self.dij_recov_distance_map[g.x, g.y, tx, ty]

        self.value_map = 1 / np.maximum(base_value_map + global_border_map * 1, 0.001)
        print_map(global_border_map, "global_border_")
        print_map(base_value_map, "base_value_")
        print_map(self.value_map, "value_map_")
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_production_map(self):
        self.value_production_map = (self.border_map - self.combat_zone_map * (self.enemy_strength_map[1] == 0)) * self.recover_wtd_map
        #self.value_production_map = (self.border_map - self.combat_zone_map) * self.recover_wtd_map
        self.value_production_map[self.value_production_map == 0] = 9999
        turns_left = self.max_turns - self.frame
        recover_threshold = turns_left * 0.6
        self.value_production_map[self.value_production_map > recover_threshold] == 9999
        bx, by = np.unravel_index(self.value_production_map.argmin(), (self.width, self.height))
        best_cell_value = self.value_production_map[bx, by]

        avg_recov_threshold = 2
        avg_map_recovery = np.sum(self.strength_map * self.border_map) / np.sum(self.production_map * self.border_map)
        self.value_production_map[self.value_production_map > (avg_recov_threshold * avg_map_recovery)] = 9999

        if self.frame > 5 and self.my_production_sum / self.next_highest_production_sum > 1.1 and np.sum(self.combat_zone_map) > 2:
            self.value_production_map = np.ones((self.width, self.height)) * 9999
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        self.base_value_map = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.value_map = np.zeros((self.width, self.height))
        cells_out = 5
        num_cells = cells_out * (cells_out + 1) * 2 + 1
        for x in range(self.width):
            for y in range(self.height):
                if self.is_neutral_map[x, y]:
                    self.value_map += (self.distance_map_no_decay[x, y] + self.base_value_map[x, y]) * (self.distance_map_no_decay[x, y] <= cells_out)
                else:
                    self.value_map += (self.distance_map_no_decay[x, y] + 100) * (self.distance_map_no_decay[x, y] <= cells_out)
                # Add in the cost to get to each square.

        self.global_search_map = np.copy(self.value_map)

        self.value_map /= num_cells

        for x in range(self.width):
            for y in range(self.height):
                temp_map = self.dij_recov_distance_map[x, y] * (self.is_owned_map == 1)
                temp_map[temp_map == 0] = 9999
                tx, ty = np.unravel_index(temp_map.argmin(), (self.width, self.height))
                self.global_search_map[x, y] += self.dij_recov_distance_map[x, y, tx, ty]
        print_map(self.value_map, "value_map_")
        print_map(self.global_search_map, "global_search_map_")
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        self.base_value_map = np.divide(self.production_map_01, self.strength_map_1) * (self.is_neutral_map - self.combat_zone_map)
        # Each neutral cell gets assigned to the closest border non-combat cell
        global_targets_indices = np.transpose(np.nonzero(self.is_neutral_map - self.combat_zone_map))
        global_targets = [self.squares[c[0], c[1]] for c in global_targets_indices]
        # border_squares_indices = np.transpose(np.nonzero(self.border_map - self.combat_zone_map))
        # border_squares = [self.squares[c[0], c[1]] for c in border_squares_indices]
        self.global_border_map = np.zeros((self.w, self.h))

        for g in global_targets:
            # Find the closest border square that routes to g
            gb_map = self.dij_recov_distance_map[g.x, g.y] * (self.border_map - self.combat_zone_map)
            gb_map[gb_map == 0] = 9999
            tx, ty = np.unravel_index(gb_map.argmin(), (self.w, self.h))
            self.global_border_map[tx, ty] += self.base_value_map[g.x, g.y] / self.dij_recov_distance_map[g.x, g.y, tx, ty]

        self.value_map = 1 / np.maximum(self.base_value_map + self.global_border_map * 1, 0.001)
        print_map(self.global_border_map, "global_border_")
        print_map(self.base_value_map, "base_value_")
        print_map(self.value_map, "value_map_")
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        base_value_map = np.divide(self.production_map_01, self.strength_map_1) * (self.is_neutral_map - self.combat_zone_map)
        # Each neutral cell gets assigned to the closest border non-combat cell
        global_targets_indices = np.transpose(np.nonzero(self.is_neutral_map - self.combat_zone_map))
        global_targets = [self.squares[c[0], c[1]] for c in global_targets_indices]
        # border_squares_indices = np.transpose(np.nonzero(self.border_map - self.combat_zone_map))
        # border_squares = [self.squares[c[0], c[1]] for c in border_squares_indices]
        global_border_map = np.zeros((self.w, self.h))

        for g in global_targets:
            # Find the closest border square that routes to g
            gb_map = self.dij_recov_distance_map[g.x, g.y] * (self.border_map - self.combat_zone_map)
            gb_map[gb_map == 0] = 9999
            tx, ty = np.unravel_index(gb_map.argmin(), (self.w, self.h))
            global_border_map[tx, ty] += base_value_map[g.x, g.y] / self.dij_recov_distance_map[g.x, g.y, tx, ty]

        self.value_map = 1 / np.maximum(base_value_map + global_border_map * 1, 0.001)
        print_map(global_border_map, "global_border_")
        print_map(base_value_map, "base_value_")
        print_map(self.value_map, "value_map_")
项目:Halite    作者:shummie    | 项目源码 | 文件源码
def update_value_maps(self):
        self.base_value_map = np.divide(self.production_map_01, self.strength_map_1) * (self.is_neutral_map - self.combat_zone_map)
        # Each neutral cell gets assigned to the closest border non-combat cell
        global_targets_indices = np.transpose(np.nonzero(self.is_neutral_map - self.combat_zone_map))
        global_targets = [self.squares[c[0], c[1]] for c in global_targets_indices]
        self.global_border_map = np.zeros((self.w, self.h))

        for g in global_targets:
            # Find the closest border square that routes to g
            gb_map = self.dij_recov_distance_map[g.x, g.y] * (self.border_map - self.combat_zone_map)
            gb_map[gb_map == 0] = 9999
            tx, ty = np.unravel_index(gb_map.argmin(), (self.w, self.h))
            self.global_border_map[tx, ty] += self.base_value_map[g.x, g.y] / self.dij_recov_distance_map[g.x, g.y, tx, ty]

        self.value_map = 1 / np.maximum(self.base_value_map + self.global_border_map * 1, 0.001)
        print_map(self.global_border_map, "global_border_")
        print_map(self.base_value_map, "base_value_")
        print_map(self.value_map, "value_map_")
项目:dl4mt-multi    作者:nyu-dl    | 项目源码 | 文件源码
def process_batch(self, batch):
        """
        Execution of an update step, infer cg_id from selectors, and pick
        corresponding computational graph, and apply batch to the CG.
        """
        cg_id = self.get_cg_id_from_selectors(batch['src_selector'][0],
                                              batch['trg_selector'][0])

        # Apply input replacement with <UNK> if necessary
        if self.drop_input[cg_id] > 0.0:
            num_els = numpy.prod(batch['source'].shape)
            num_reps = max(1, int(num_els * self.drop_input[cg_id]))
            replace_idx = numpy.random.choice(num_els, num_reps, replace=False)
            # TODO: set it according to unk_id in config
            batch['source'][numpy.unravel_index(
                replace_idx, batch['source'].shape)] = 1

        ordered_batch = [batch[v.name] for v in self.algorithms[cg_id].inputs]

        # To save memory, we may combine f_update and f_grad_shared
        if self.f_grad_shareds[cg_id] is None:
            inps = [self.learning_rate] + ordered_batch
            cost = self.f_updates[cg_id](*inps)
            self._cost = ('cost_' + cg_id, cost)
        else:
            cost = self.f_grad_shareds[cg_id](*ordered_batch)
            self._cost = ('cost_' + cg_id, cost)
            self.f_updates[cg_id](self.learning_rate)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def make_gaussian_batch(heatmaps, size, fwhm):
    """ Make a square gaussian kernel.
    size is the length of a side of the square
    fwhm is full-width-half-maximum, which
    can be thought of as an effective radius.
    """
    stride = heatmaps.shape[1] // size

    batch_datum = np.zeros(shape=(heatmaps.shape[0], size, size, heatmaps.shape[3]))

    for data_num in range(heatmaps.shape[0]):
        for joint_num in range(heatmaps.shape[3] - 1):
            heatmap = heatmaps[data_num, :, :, joint_num]
            center = np.unravel_index(np.argmax(heatmap), (heatmap.shape[0], heatmap.shape[1]))

            x = np.arange(0, size, 1, float)
            y = x[:, np.newaxis]

            if center is None:
                x0 = y0 = size * stride // 2
            else:
                x0 = center[1]
                y0 = center[0]

            batch_datum[data_num, :, :, joint_num] = np.exp(
                -((x * stride - x0) ** 2 + (y * stride - y0) ** 2) / 2.0 / fwhm / fwhm)
        batch_datum[data_num, :, :, heatmaps.shape[3] - 1] = np.ones((size, size)) - np.amax(
            batch_datum[data_num, :, :, 0:heatmaps.shape[3] - 1], axis=2)

    return batch_datum
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def backward(self, pre_grad, *args, **kwargs):
        new_h, new_w = self.out_shape[-2:]
        pool_h, pool_w = self.pool_size

        layer_grads = _zero(self.input_shape)

        if np.ndim(pre_grad) == 4:
            nb_batch, nb_axis, _, _ = pre_grad.shape

            for a in np.arange(nb_batch):
                for b in np.arange(nb_axis):
                    for h in np.arange(new_h):
                        for w in np.arange(new_w):
                            patch = self.last_input[a, b, h:h + pool_h, w:w + pool_w]
                            max_idx = np.unravel_index(patch.argmax(), patch.shape)
                            h_shift, w_shift = h * pool_h + max_idx[0], w * pool_w + max_idx[1]
                            layer_grads[a, b, h_shift, w_shift] = pre_grad[a, b, a, w]

        elif np.ndim(pre_grad) == 3:
            nb_batch, _, _ = pre_grad.shape

            for a in np.arange(nb_batch):
                for h in np.arange(new_h):
                    for w in np.arange(new_w):
                        patch = self.last_input[a, h:h + pool_h, w:w + pool_w]
                        max_idx = np.unravel_index(patch.argmax(), patch.shape)
                        h_shift, w_shift = h * pool_h + max_idx[0], w * pool_w + max_idx[1]
                        layer_grads[a, h_shift, w_shift] = pre_grad[a, a, w]

        else:
            raise ValueError()

        return layer_grads
项目:hh0    作者:sfeeney    | 项目源码 | 文件源码
def riess_reject(n_c_ch, app_mag_err_c, sig_int_c, res, threshold = 2.7):

    res_scaled = np.zeros(res.shape)
    for i in range(0, len(n_c_ch)):
        res_scaled[i, 0: n_c_ch[i]] = np.abs(res[i, 0: n_c_ch[i]]) / \
                                      np.sqrt(app_mag_err_c[i, 0: n_c_ch[i]] ** 2 + 
                                              sig_int_c ** 2)
    to_rej = np.unravel_index(np.argmax(res_scaled), res.shape)
    if res_scaled[to_rej] > threshold:
        return to_rej
    else:
        return None
项目:generals_a3c    作者:yilundu    | 项目源码 | 文件源码
def gen_valid_move(move_index, label_map, army_map, dims):
    """Generate the top valid move given an output from network"""
    x1, y1, x2, y2 = 0, 0, 0, 0
    move_half = False

    for i in range(moves.shape[0]):
        move = moves[i]
        if action_mask[move] == 0:
            break

        move_type, y1, x1 = np.unravel_index(move, (8, dims[0], dims[1]))
        index = move_type % 4

        if index == 0:
            x2, y2 = x1, y1 + 1
        elif index == 1:
            x2, y2 = x1 + 1, y1
        elif index == 2:
            x2, y2 = x1, y1 - 1
        elif index == 3:
            x2, y2 = x1 - 1, y1

        move_half = True if move_type >= 4 else False

        if y2 < 0 or y2 >= dims[0] or x2 < 0 or x2 >= dims[1]:
            continue

        if not (
            label_map[
                y2,
                x2] == generals.MOUNTAIN) and (
            army_map[
                y1,
                x1] > 1):
            break

    return x1, y1, x2, y2, move_half
项目:pypiv    作者:jr7    | 项目源码 | 文件源码
def find_peak(corr, method='gaussian'):
    """Peak detection algorithm switch

    After loading the correlation window an maximum finder is invoked.
    The correlation window is cut down to the necessary 9 points around the maximum.
    Afterwards the maximum is checked not to be close to the boarder of the correlation frame.
    This cropped window is used in along with the chosen method to interpolate the sub pixel shift.
    Each interpolation method returns a tuple with the sub pixel shift in x and y direction.
    The maximums position and the sub pixel shift are added and returned.
    If an error occurred during the sub pixel interpolation the shift is set to nan.
    Also if the interpolation method is unknown an exception in thrown.

    :param corr: correlation window
    :param method: peak finder algorithm (gaussian, centroid, parabolic, 9point)
    :raises: Sub pixel interpolation method not found
    :returns: shift in interrogation window
    """
    i, j = np.unravel_index(corr.argmax(), corr.shape)
    if check_peak_position(corr, i, j) is False:
        return np.nan, np.nan
    window = corr[i-1:i+2, j-1:j+2]

    if method == 'gaussian':
        subpixel_interpolation = gaussian
    elif method == 'centroid':
        subpixel_interpolation = centroid
    elif method == 'parabolic':
        subpixel_interpolation = parabolic
    elif method == '9point':
        subpixel_interpolation = gaussian2D
    else:
        raise Exception('Sub pixel interpolation method not found!')
    try:
        dx, dy = subpixel_interpolation(window)
    except:
        return np.nan, np.nan
    else:
        return (i + dx, j + dy)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _sample_direct(self, rng, state, mode, n_samples, out, eps):
        """Sample from full pmfilities (call :func:`self.sample`)"""
        pmf = self.pmf_as_array(state, mode, eps)
        choices = rng.choice(pmf.size, n_samples, p=pmf.flat)
        for pos, c in enumerate(np.unravel_index(choices, pmf.shape)):
            out[:, pos] = c
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def test_array_orientation_consistency_tilt():
    ''' The pupil array should be shaped as arr[x,y], as should the psf and MTF.
        A linear phase error in the pupil along y should cause a motion of the
        PSF in y.  Specifically, for a positive-signed phase, that should cause
        a shift in the +y direction.
    '''
    samples = 128
    p = Seidel(W111=1, samples=samples)
    ps = PSF.from_pupil(p, 1)
    idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape)  # row-major y, x
    assert idx_x == ps.center_x
    assert idx_y > ps.center_y
项目:CNN-from-Scratch    作者:zishansami102    | 项目源码 | 文件源码
def nanargmax(a):
    idx = np.argmax(a, axis=None)
    multi_idx = np.unravel_index(idx, a.shape)
    if np.isnan(a[multi_idx]):
        nan_count = np.sum(np.isnan(a))
        # In numpy < 1.8 use idx = np.argsort(a, axis=None)[-nan_count-1]
        idx = np.argpartition(a, -nan_count-1, axis=None)[-nan_count-1]
        multi_idx = np.unravel_index(idx, a.shape)
    return multi_idx
项目:CNN-from-Scratch    作者:zishansami102    | 项目源码 | 文件源码
def nanargmax(a):
    idx = np.argmax(a, axis=None)
    multi_idx = np.unravel_index(idx, a.shape)
    if np.isnan(a[multi_idx]):
        nan_count = np.sum(np.isnan(a))
        # In numpy < 1.8 use idx = np.argsort(a, axis=None)[-nan_count-1]
        idx = np.argpartition(a, -nan_count-1, axis=None)[-nan_count-1]
        multi_idx = np.unravel_index(idx, a.shape)
    return multi_idx
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def create_world_from_grid(self, grid, im_size, start, goal):
        # Create gazebo world out of the grid
        scale = 0.75

        # Add start box
        self.add_target_box_green(start[0], start[1])

        # Add goal box
        self.add_target_box_red(goal[0], goal[1])

        # Build walls around the field
        wall_width = 0.5
        self.add_wall(scale*(im_size[0]-1)/2.0, 0, 0, scale*(im_size[0]-1), wall_width)
        self.add_wall(0, scale*(im_size[1]-1)/2.0, pi / 2.0, scale*(im_size[0]-1), wall_width)
        self.add_wall(scale*(im_size[0]-1), scale*(im_size[1]-1)/2.0, - pi / 2.0, scale*(im_size[0]-1), wall_width)
        self.add_wall(scale*(im_size[0]-1)/2.0, scale*(im_size[1]-1), pi, scale*(im_size[0]-1), wall_width)

        # Add asphalt
        self.add_tarmac(scale*(im_size[0]-1)/2.0, scale*(im_size[1]-1)/2.0, 0, scale*(im_size[0]-1), scale*(im_size[1]-1))

        # Add cones wherever there should be obstacles
        i = 1
        j = 1
        obstacle_indices = np.where(grid != 1)
        unraveled_indices = np.unravel_index(obstacle_indices, im_size, order='C')
        for x in grid:
            if (grid[j+i*im_size[0]] != 1):
                self.add_cone(scale*j, scale*i)
                self.write()
            j += 1
            if (j % (im_size[1]-1)) == 0:
                j = 1
                i +=1
                if (i == im_size[0]-1):
                    break
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def ind2sub(shape, inds):
    """From the given shape, returns the subscripts of the given index"""
    if type(inds) is not np.ndarray:
        inds = np.array(inds)
    assert len(inds.shape) == 1, (
        'Indexing must be done as a 1D row vector, e.g. [3,6,6,...]'
    )
    return np.unravel_index(inds, shape, order='F')
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
        assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
        assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
        assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
        assert_raises(ValueError, np.unravel_index, -1, (2, 2))
        assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
        assert_raises(ValueError, np.unravel_index, 4, (2, 2))
        assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
        assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
        assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
        assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
        assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))

        assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
        assert_equal(
            np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)

        arr = np.array([[3, 6, 6], [4, 5, 1]])
        assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
        assert_equal(
            np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
        assert_equal(
            np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
        assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
                     [12, 13, 13])
        assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)

        assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
                     [[3, 6, 6], [4, 5, 1]])
        assert_equal(
            np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
            [[3, 6, 6], [4, 5, 1]])
        assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_dtypes(self):
        # Test with different data types
        for dtype in [np.int16, np.uint16, np.int32,
                      np.uint32, np.int64, np.uint64]:
            coords = np.array(
                [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
            shape = (5, 8)
            uncoords = 8*coords[0]+coords[1]
            assert_equal(np.ravel_multi_index(coords, shape), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape))
            uncoords = coords[0]+5*coords[1]
            assert_equal(
                np.ravel_multi_index(coords, shape, order='F'), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))

            coords = np.array(
                [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
                dtype=dtype)
            shape = (5, 8, 10)
            uncoords = 10*(8*coords[0]+coords[1])+coords[2]
            assert_equal(np.ravel_multi_index(coords, shape), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape))
            uncoords = coords[0]+5*(coords[1]+8*coords[2])
            assert_equal(
                np.ravel_multi_index(coords, shape, order='F'), uncoords)
            assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
项目:thunder-registration    作者:thunder-project    | 项目源码 | 文件源码
def compute(a, b):
        """
        Compute an optimal displacement between two ndarrays.

        Finds the displacement between two ndimensional arrays. Arrays must be
        of the same size. Algorithm uses a cross correlation, computed efficiently
        through an n-dimensional fft.

        Parameters
        ----------
        a : ndarray
            The first array

        b : ndarray
            The second array
        """
        from numpy.fft import rfftn, irfftn
        from numpy import unravel_index, argmax

        # compute real-valued cross-correlation in fourier domain
        s = a.shape
        f = rfftn(a)
        f *= rfftn(b).conjugate()
        c = abs(irfftn(f, s))

        # find location of maximum
        inds = unravel_index(argmax(c), s)

        # fix displacements that are greater than half the total size
        pairs = zip(inds, a.shape)
        # cast to basic python int for serialization
        adjusted = [int(d - n) if d > n // 2 else int(d) for (d, n) in pairs]

        return Displacement(adjusted)
项目:KCF    作者:Bruceeeee    | 项目源码 | 文件源码
def update_tracker(response,img_size,pos,HOG_flag,scale_factor=1):
    start_w,start_h = response.shape
    w,h = img_size
    px,py,ww,wh = pos
    res_pos = np.unravel_index(response.argmax(),response.shape)
    scale_w = 1.0*scale_factor*(ww*2)/start_w
    scale_h = 1.0*scale_factor*(wh*2)/start_h
    move = list(res_pos)
    if not HOG_flag:
        px_new = [px+1.0*move[0]*scale_w,px-(start_w-1.0*move[0])*scale_w][move[0]>start_w/2] 
        py_new = [py+1.0*move[1]*scale_h,py-(start_h-1.0*move[1])*scale_h][move[1]>start_h/2]
        px_new = np.int(px_new) 
        py_new = np.int(py_new)
    else:
        move[0] = np.floor(res_pos[0]/32.0*(2*ww))
        move[1] = np.floor(res_pos[1]/32.0*(2*wh))
        px_new = [px+move[0],px-(2*ww-move[0])][move[0]>ww] 
        py_new = [py+move[1],py-(2*wh-move[1])][move[1]>wh] 
    if px_new<0: px_new = 0
    if px_new>w: px_new = w-1
    if py_new<0: py_new = 0
    if py_new>h: py_new = h-1
    ww_new = np.ceil(ww*scale_factor)
    wh_new = np.ceil(wh*scale_factor)
    new_pos = (px_new,py_new,ww_new,wh_new)
    return new_pos
项目:HTM_experiments    作者:ctrl-z-9000-times    | 项目源码 | 文件源码
def act(self, observation, reward):
        """
        Interact with and learn from the environment.
        Returns the suggested control vector.
        """
        observation = np.ravel_multi_index(observation, self.input_shape)
        self.xp_q.update_reward(reward)
        action = self.best_action(observation)
        self.xp_q.add(observation, action)
        action = np.unravel_index(action, self.output_shape)
        return action
项目:HTM_experiments    作者:ctrl-z-9000-times    | 项目源码 | 文件源码
def stabilize(self, prior_columns, percent):
        """
        This activates prior columns to force active in order to maintain
        the given percent of column overlap between time steps.  Always call
        this between compute and learn!
        """
        # num_active      = (len(self.columns) + len(prior_columns)) / 2
        num_active      = len(self.columns)
        overlap         = self.columns.overlap(prior_columns)
        stabile_columns = int(round(num_active * overlap))
        target_columns  = int(round(num_active * percent))
        add_columns     = target_columns - stabile_columns
        if add_columns <= 0:
            return

        eligable_columns  = np.setdiff1d(prior_columns.flat_index, self.columns.flat_index)
        eligable_excite   = self.raw_excitment[eligable_columns]
        selected_col_nums = np.argpartition(-eligable_excite, add_columns-1)[:add_columns]
        selected_columns  = eligable_columns[selected_col_nums]
        selected_index    = np.unravel_index(selected_columns, self.columns.dimensions)
        # Learn.  Note: selected columns will learn twice.  The previously
        # active segments learn now, the current most excited segments in the
        # method SP.learn().
        # Or learn not at all if theres a bug in my code...
        # if self.multisegment:
        #     if hasattr(self, 'prior_segment_excitement'):
        #         segment_excitement = self.prior_segment_excitement[selected_index]
        #         seg_idx = np.argmax(segment_excitement, axis=-1)
        #         self.proximal.learn_outputs(input_sdr=input_sdr,
        #                                     output_sdr=selected_index + (seg_idx,))
        #     self.prev_segment_excitement = self.segment_excitement
        # else:
        #     1/0
        self.columns.flat_index = np.concatenate([self.columns.flat_index, selected_columns])
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def _render(self, mode='human', close=False):
        if close:
            return

        outfile = StringIO() if mode == 'ansi' else sys.stdout

        for s in range(self.nS):
            position = np.unravel_index(s, self.shape)
            # print(self.s)
            if self.s == s:
                output = " x "
            elif position == (3,11):
                output = " T "
            elif self._cliff[position]:
                output = " C "
            else:
                output = " o "

            if position[1] == 0:
                output = output.lstrip() 
            if position[1] == self.shape[1] - 1:
                output = output.rstrip() 
                output += "\n"

            outfile.write(output)
        outfile.write("\n")
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def __init__(self):
        self.shape = (7, 10)

        nS = np.prod(self.shape)
        nA = 4

        # Wind strength
        winds = np.zeros(self.shape)
        winds[:,[3,4,5,8]] = 1
        winds[:,[6,7]] = 2

        # Calculate transition probabilities
        P = {}
        for s in range(nS):
            position = np.unravel_index(s, self.shape)
            P[s] = { a : [] for a in range(nA) }
            P[s][UP] = self._calculate_transition_prob(position, [-1, 0], winds)
            P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1], winds)
            P[s][DOWN] = self._calculate_transition_prob(position, [1, 0], winds)
            P[s][LEFT] = self._calculate_transition_prob(position, [0, -1], winds)

        # We always start in state (3, 0)
        isd = np.zeros(nS)
        isd[np.ravel_multi_index((3,0), self.shape)] = 1.0

        super(WindyGridworldEnv, self).__init__(nS, nA, P, isd)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def argmax(a):
    """ Return unravelled index of the maximum

    param: a: array to be searched
    """
    return numpy.unravel_index(a.argmax(), a.shape)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def find_max_abs_stack(stack, windowstack, couplingmatrix):
    """Find the location and value of the absolute maximum in this stack
    :param stack: stack to be searched
    :param windowstack: Window for the search
    :param couplingmatrix: Coupling matrix between difference scales
    :return: x, y, scale

    """
    pabsmax = 0.0
    pscale = 0
    px = 0
    py = 0
    nscales = stack.shape[0]
    assert nscales > 0
    pshape = [stack.shape[1], stack.shape[2]]
    for iscale in range(nscales):
        if windowstack is not None:
            resid = stack[iscale, :, :] * windowstack[iscale, :, :] / couplingmatrix[iscale, iscale]
        else:
            resid = stack[iscale, :, :] / couplingmatrix[iscale, iscale]

        # Find the peak in the scaled residual image
        mx, my = numpy.unravel_index(numpy.abs(resid).argmax(), pshape)

        # Is this the peak over all scales?
        thisabsmax = numpy.abs(resid[mx, my])
        if thisabsmax > pabsmax:
            px = mx
            py = my
            pscale = iscale
            pabsmax = thisabsmax

    return px, py, pscale
项目:modesolverpy    作者:jtambasco    | 项目源码 | 文件源码
def fit_gaussian(x, y, z_2d, save_fits=False):
    z = z_2d

    max_idx = np.unravel_index(z.argmax(), z.shape)
    max_row = max_idx[0] - 1
    max_col = max_idx[1] - 1

    z_max_row = z[max_row, :]
    z_max_col = z[:, max_col]
    A = z[max_row, max_col]

    p_guess_x = (A, x[max_col], 0.1*(x[-1] - x[0]))
    p_guess_y = (A, y[max_row], 0.1*(y[-1] - y[0]))

    coeffs_x, var_matrix_x = sciopt.curve_fit(gaussian, x, z_max_row, p_guess_x)
    coeffs_y, var_matrix_y = sciopt.curve_fit(gaussian, y, z_max_col, p_guess_y)

    c_x = (x[-1]-x[0])*(max_col+1)/x.size + x[0]
    c_y = (y[-1]-y[0])*(y.size-(max_row+1))/y.size + y[0]
    centre = (c_x, c_y)

    sigma = np.array([coeffs_x[2], coeffs_y[2]])
    fwhm = 2.355 * sigma
    sigma_2 = 1.699 * fwhm

    if save_fits:
        with open('x_fit.dat', 'w') as fs:
            for c in np.c_[x, z_max_row, gaussian(x, *coeffs_x)]:
                s = ','.join([str(v) for v in c])
                fs.write(s+'\n')
        with open('y_fit.dat', 'w') as fs:
            for c in np.c_[y, z_max_col, gaussian(y, *coeffs_y)]:
                s = ','.join([str(v) for v in c])
                fs.write(s+'\n')

    return A, centre, sigma_2
项目:bi-att-flow    作者:allenai    | 项目源码 | 文件源码
def argmax(x):
    return np.unravel_index(x.argmax(), x.shape)
项目:polara    作者:Evfro    | 项目源码 | 文件源码
def get_test_tensor(self, test_data, shape, start, end):
        slice_idx = self._slice_test_data(test_data, start, end)

        num_users = end - start
        num_items = shape[1]
        num_fdbks = shape[2]
        slice_shp = (num_users, num_items, num_fdbks)

        idx_flat = np.ravel_multi_index(slice_idx, slice_shp)
        shp_flat = (num_users*num_items, num_fdbks)
        idx = np.unravel_index(idx_flat, shp_flat)
        val = np.ones_like(slice_idx[2])

        test_tensor_unfolded = csr_matrix((val, idx), shape=shp_flat, dtype=val.dtype)
        return test_tensor_unfolded, slice_idx