Python numpy 模块,flip() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.flip()

项目:pauvre    作者:conchoecia    | 项目源码 | 文件源码
def plotArc(start_angle, stop_angle, radius, width, **kwargs):
    """ write a docstring for this function"""
    numsegments = 100
    theta = np.radians(np.linspace(start_angle+90, stop_angle+90, numsegments))
    centerx = 0
    centery = 0
    x1 = -np.cos(theta) * (radius)
    y1 = np.sin(theta) * (radius)
    stack1 = np.column_stack([x1, y1])
    x2 = -np.cos(theta) * (radius + width)
    y2 = np.sin(theta) *  (radius + width)
    stack2 = np.column_stack([np.flip(x2, axis=0), np.flip(y2,axis=0)])
    #add the first values from the first set to close the polygon
    np.append(stack2, [[x1[0],y1[0]]], axis=0)
    arcArray = np.concatenate((stack1,stack2), axis=0)
    return patches.Polygon(arcArray, True, **kwargs), ((x1, y1), (x2, y2))
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def save(self, path, nbits=8):
        ''' Write the image to a png, jpg, tiff, etc.

        Args:
            path (`string`): path to write the image to.

            nbits (`int`): number of bits in the output image.

        Returns:
            null: no return

        '''
        dat = (self.data * 255).astype(np.uint8)

        if self.synthetic is False:
            # was a real image, need to flip vertically.
            dat = np.flip(dat, axis=0)
        imsave(path, dat)
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def from_file(path, scale):
        ''' Reads a file into a new Image instance, always monochrome

        Args:
            path (`string`): path to a file.

            scale (`float`): pixel scale, in microns.

        Returns:
            `Image`: a new image object.

        Notes:
            TODO: proper handling of images with more than 8bpp.
        '''
        imgarr = imread(path, flatten=True, mode='F')

        return Image(data=np.flip(imgarr, axis=0) / 255, sample_spacing=scale, synthetic=False)
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def save(self, path, nbits=8):
        ''' Write the image to a png, jpg, tiff, etc.

        Args:
            path (`string`): path to write the image to.

            nbits (`int`): number of bits in the output image.

        Returns:
            null: no return

        '''
        dat = rgbimage_to_datacube(self)

        if self.synthetic is not True:
            # was a real image, need to flip vertically.
            dat = np.flip(dat, axis=0)

        imsave(path, dat)
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def batches_by_size(src, dst, max_tokens=None, max_sentences=None,
                    max_positions=(1024, 1024), ignore_invalid_inputs=False,
                    descending=False):
    """Returns batches of indices sorted by size. Sequences with different
    source lengths are not allowed in the same batch."""
    assert isinstance(src, IndexedDataset) and isinstance(dst, IndexedDataset)
    if max_tokens is None:
        max_tokens = float('Inf')
    if max_sentences is None:
        max_sentences = float('Inf')
    indices = np.argsort(src.sizes, kind='mergesort')
    if descending:
        indices = np.flip(indices, 0)
    return _make_batches(
        src, dst, indices, max_tokens, max_sentences, max_positions,
        ignore_invalid_inputs, allow_different_src_lens=False)
项目:seismic-python    作者:malcolmw    | 项目源码 | 文件源码
def read_interfaces(infile):
    infile = open(infile)
    ninter = int(infile.readline().split()[0])
    nlambda, nphi = [int(v) for v in infile.readline().split()[:2]]
    dlambda, dphi = [np.float64(v) for v in infile.readline().split()[:2]]
    lambda0, phi0 = [np.float64(v) for v in infile.readline().split()[:2]]
    grid = seispy.geogrid.GeoGrid2D(np.degrees(lambda0), np.degrees(phi0),
                             nlambda, nphi,
                             np.degrees(dlambda), np.degrees(dphi))
    interfaces = []
    for iinter in range(ninter):
        surf = seispy.surface.GeoSurface()
        surf.grid = grid
        coordinates = seispy.coords.as_left_spherical([[[np.float64(infile.readline().split()[0]),
                                                  lambda0 + ilambda*dlambda,
                                                  phi0 + iphi*dphi]
                                                for iphi in range(nphi)]
                                                for ilambda in range(nlambda)])
        coordinates = np.flip(coordinates.to_spherical(), axis=0)
        surf.coordinates = coordinates
        interfaces.append(surf)
    return(interfaces)
项目:pystudio    作者:satorchi    | 项目源码 | 文件源码
def Pbias(self,TES):
    '''
    find the Pbias at 90% Rn
    '''    
    filterinfo=self.filterinfo(TES)
    if filterinfo==None:return None

    Rn_ratio=self.Rn_ratio(TES)
    if not isinstance(Rn_ratio,np.ndarray):return None

    istart,iend=self.selected_iv_curve(TES)

    Rn_ratio=Rn_ratio[istart:iend]
    Ptes=self.Ptes(TES)
    Ptes=Ptes[istart:iend]

    # check that Rn_ratio is increasing
    increasing=np.diff(Rn_ratio).mean()
    if increasing<0:
        Pbias=np.interp(90., np.flip(Rn_ratio,0), np.flip(Ptes,0))
    else:
        Pbias=np.interp(90., Rn_ratio, Ptes)

    return Pbias
项目:real_time_face_detection    作者:Snowapril    | 项目源码 | 文件源码
def horizontal_flip(imgs, labels):
    if imgs.ndim != 2:
        print("Image dimension must be 2")
        raise Exception

    if imgs.shape[0] != labels.shape[0]:
        print("Images num and labels num must be equal")
        raise Exception

    #flip the img horizontally
    imgs = imgs.reshape(-1, 96, 96)
    imgs = np.flip(imgs, axis=2)
    imgs = imgs.reshape(-1, 96*96)

    #when flip the image horizontally, img's ypos does not change but xpos reflect on img's center pos
    result = np.copy(labels)

    for idx in range(labels.shape[0]):
        result[idx][::2] = 96 - result[idx][::2]

    return imgs, labels
项目:fairseq-py    作者:facebookresearch    | 项目源码 | 文件源码
def batches_by_size(src, dst, max_tokens=None, max_sentences=None,
                    max_positions=(1024, 1024), ignore_invalid_inputs=False,
                    descending=False):
    """Returns batches of indices sorted by size. Sequences with different
    source lengths are not allowed in the same batch."""
    assert isinstance(src, IndexedDataset) and isinstance(dst, IndexedDataset)
    if max_tokens is None:
        max_tokens = float('Inf')
    if max_sentences is None:
        max_sentences = float('Inf')
    indices = np.argsort(src.sizes, kind='mergesort')
    if descending:
        indices = np.flip(indices, 0)
    return _make_batches(
        src, dst, indices, max_tokens, max_sentences, max_positions,
        ignore_invalid_inputs, allow_different_src_lens=False)
项目:chainladder-python    作者:jbogaardt    | 项目源码 | 文件源码
def __model_form(self, tri_array):
        w = np.nan_to_num(self.weights/tri_array[:,:,:-1]**(2-self.alpha))
        x = np.nan_to_num(tri_array[:,:,:-1]*(tri_array[:,:,1:]*0+1))
        y = np.nan_to_num(tri_array[:,:,1:])
        LDF = np.sum(w*x*y,axis=1)/np.sum(w*x*x,axis=1)
        #Chainladder (alpha=1/delta=1)
        #LDF = np.sum(np.nan_to_num(tri_array[:,:,1:]),axis=1) / np.sum(np.nan_to_num((tri_array[:,:,1:]*0+1)*tri_array[:,:,:-1]),axis=1)
        #print(LDF.shape)
        # assumes no tail
        CDF = np.append(np.cumprod(LDF[:,::-1],axis=1)[:,::-1],np.array([1]*tri_array.shape[0]).reshape(tri_array.shape[0],1),axis=1)    
        latest = np.flip(tri_array,axis=1).diagonal(axis1=1,axis2=2)   
        ults = latest*CDF
        lu = list(ults)
        lc = list(CDF)
        exp_cum_triangle = np.array([np.flipud(lu[num].reshape(tri_array.shape[2],1).dot(1/lc[num].reshape(1,tri_array.shape[2]))) for num in range(tri_array.shape[0])])
        exp_incr_triangle = np.append(exp_cum_triangle[:,:,0,np.newaxis],np.diff(exp_cum_triangle),axis=2)
        return LDF, CDF, ults, exp_incr_triangle
项目:pytorch_fnet    作者:AllenCellModeling    | 项目源码 | 文件源码
def pad_mirror(ar, padding):
    """Pad 3d array using mirroring.

    Parameters:
    ar - (numpy.array) array to be padded
    padding - (tuple) per-dimension padding values
    """
    shape = tuple((ar.shape[i] + 2*padding[i]) for i in range(3))
    result = np.zeros(shape, dtype=ar.dtype)
    slices_center = tuple(slice(padding[i], padding[i] + ar.shape[i]) for i in range(3))
    result[slices_center] = ar
    # z-axis, centers
    if padding[0] > 0:
        result[0:padding[0], slices_center[1] , slices_center[2]] = np.flip(ar[0:padding[0], :, :], axis=0)
        result[ar.shape[0] + padding[0]:, slices_center[1] , slices_center[2]] = np.flip(ar[-padding[0]:, :, :], axis=0)
    # y-axis
    result[:, 0:padding[1], :] = np.flip(result[:, padding[1]:2*padding[1], :], axis=1)
    result[:, padding[1] + ar.shape[1]:, :] = np.flip(result[:, ar.shape[1]:ar.shape[1] + padding[1], :], axis=1)
    # x-axis
    result[:, :, 0:padding[2]] = np.flip(result[:, :, padding[2]:2*padding[2]], axis=2)
    result[:, :, padding[2] + ar.shape[2]:] = np.flip(result[:, :, ar.shape[2]:ar.shape[2] + padding[2]], axis=2)
    return result
项目:pytorch_fnet    作者:AllenCellModeling    | 项目源码 | 文件源码
def _augment_chunks(self, chunks):
        if self.choices_augmentation is None:
            return chunks
        chunks_new = []
        choice = np.random.choice(self.choices_augmentation)
        for chunk in chunks:
            chunk_new = chunk
            if choice in [1, 3, 5, 7]:
                chunk_new = np.flip(chunk_new, axis=1)
            if   choice in [2, 3]:
                chunk_new = np.rot90(chunk_new, 1, axes=(1, 2))
            elif choice in [4, 5]:
                chunk_new = np.rot90(chunk_new, 2, axes=(1, 2))
            elif choice in [6, 7]:
                chunk_new = np.rot90(chunk_new, 3, axes=(1, 2))
            chunks_new.append(chunk_new)
        return chunks_new
项目:paradox    作者:ictxiangxin    | 项目源码 | 文件源码
def __compute_valid_convolution_nd(data, kernel, dimension: int):
    convolution_shape = tuple(data.shape[i] - kernel.shape[i] + 1 for i in range(-1, -dimension - 1, -1))
    list_dimension = reduce(lambda a, b: a * b, convolution_shape)
    data_prefix = data.shape[:-dimension]
    kernel_flat = kernel.ravel()
    data_flat = numpy.zeros(data_prefix + (list_dimension, len(kernel_flat)))
    for i in range(list_dimension):
        tensor_slice_start = [0] * len(kernel.shape)
        tensor_slice = [slice(None)] * len(data.shape)
        tensor_slice_start[-1] = i
        for r in range(-1, -len(kernel.shape) - 1, -1):
            dimension_scale = data.shape[r] - kernel.shape[r] + 1
            if tensor_slice_start[r] >= dimension_scale:
                tensor_slice_start[r + 1] = tensor_slice_start[r] // dimension_scale
                tensor_slice_start[r] %= dimension_scale
            tensor_slice[r] = slice(tensor_slice_start[r], tensor_slice_start[r] + kernel.shape[r])
        sub_convolution_index = (slice(None),) * (len(data.shape) - dimension) + tuple([i, slice(None)])
        data_flat[sub_convolution_index] = data[tensor_slice].reshape(data_prefix + (reduce(lambda a, b: a * b, kernel.shape),))
    convolution_flat = numpy.matmul(data_flat, numpy.flip(kernel_flat, axis=0))
    convolution_nd = convolution_flat.reshape(data_prefix + convolution_shape)
    return convolution_nd
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def slice_cube(cube):

    slices = np.zeros((cube.shape[0], cube.shape[0], 9), dtype=np.float32)

    # axis-aligned
    slices[:,:,0] = cube[np.floor(cube.shape[0] / 2).astype(int), :, :]
    slices[:,:,1] = cube[:, np.floor(cube.shape[0] / 2).astype(int), :]
    slices[:,:,2] = cube[:, :, np.floor(cube.shape[0] / 2).astype(int)]

    # diagonals
    slices[:,:,3] = cube.diagonal(axis1=0, axis2=1)
    slices[:,:,4] = cube.diagonal(axis1=0, axis2=2)
    slices[:,:,5] = cube.diagonal(axis1=1, axis2=2)
    slices[:,:,6] = np.flip(cube, 0).diagonal(axis1=0, axis2=1)
    slices[:,:,7] = np.flip(cube, 0).diagonal(axis1=0, axis2=2)
    slices[:,:,8] = np.flip(cube, 1).diagonal(axis1=1, axis2=2)

    return slices
项目:c3nav    作者:c3nav    | 项目源码 | 文件源码
def to_image(self):
        from c3nav.mapdata.models import Source
        (minx, miny), (maxx, maxy) = Source.max_bounds()

        height, width = self.data.shape
        image_data = np.zeros((int(math.ceil((maxy-miny)/self.resolution)),
                               int(math.ceil((maxx-minx)/self.resolution))), dtype=np.uint8)

        if self.data.size:
            minval = min(self.data.min(), 0)
            maxval = max(self.data.max(), minval+0.01)
            visible_data = ((self.data.astype(float)-minval)*255/(maxval-minval)).clip(0, 255).astype(np.uint8)
            image_data[self.y:self.y+height, self.x:self.x+width] = visible_data

        from PIL import Image
        return Image.fromarray(np.flip(image_data, axis=0), 'L')
项目:TikZ    作者:ellisk42    | 项目源码 | 文件源码
def draw(self,context = None, adjustCanvasSize = False):
        if adjustCanvasSize:
            x0,y0,x1,y1 = self.extent()
            self = self.translate(-x0 + 1,-y0 + 1)
            x0,y0,x1,y1 = self.extent()
            W = max([256, 16*(y1 + 1), 16*(x1 + 1)])
            H = W
        else:
            W = 256
            H = 256

        if context == None:
            data = np.zeros((W,H), dtype=np.uint8)
            surface = cairo.ImageSurface.create_for_data(data,cairo.FORMAT_A8,W,H)
            context = cairo.Context(surface)
        for l in self.lines: l.draw(context)
        data = np.flip(data, 0)/255.0
        if adjustCanvasSize:
            import scipy.ndimage
            return scipy.ndimage.zoom(data,W/256.0)
        return data
项目:gy_mlcamp17    作者:gylee1103    | 项目源码 | 文件源码
def _random_preprocessing(self, image, size):
      # rotate image
      rand_degree = np.random.randint(0, 90)
      rand_flip = np.random.randint(0, 2)
      if rand_flip == 1:
        image = np.flip(image, 1)
      image = scipy.ndimage.interpolation.rotate(image, rand_degree, cval=255)

      # Select cropping range between (target_size/2 ~ original_size)
      original_h, original_w = image.shape
      #crop_width = np.random.randint(self.target_size/3, min(self.target_size, original_w))
      #crop_height = np.random.randint(self.target_size/3, min(self.target_size, original_h))
      crop_width = self.target_size
      crop_height = self.target_size
      topleft_x = np.random.randint(0, original_w - crop_width)
      topleft_y = np.random.randint(0, original_h - crop_height)
      cropped_img = image[topleft_y:topleft_y+crop_height,
          topleft_x:topleft_x+crop_width]
      #output = scipy.misc.imresize(cropped_img, [self.target_size, self.target_size])
      output = cropped_img

      output = (output - 128.0) / 128.0
      return output
项目:gy_mlcamp17    作者:gylee1103    | 项目源码 | 文件源码
def _random_preprocessing(self, image, size):
      # rotate image
      rand_degree = np.random.randint(0, 180)
      rand_flip = np.random.randint(0, 2)
      image = scipy.ndimage.interpolation.rotate(image, rand_degree, cval=255)
      if rand_flip == 1:
        image = np.flip(image, 1)

      # Select cropping range between (target_size/2 ~ original_size)
      original_h, original_w = image.shape
      crop_width = np.random.randint(self.target_size/2, min(self.target_size*2, original_w))
      crop_height = np.random.randint(self.target_size/2, min(self.target_size*2, original_h))
      topleft_x = np.random.randint(0, original_w - crop_width)
      topleft_y = np.random.randint(0, original_h - crop_height)
      cropped_img = image[topleft_y:topleft_y+crop_height,
          topleft_x:topleft_x+crop_width]
      output = scipy.misc.imresize(cropped_img, [self.target_size, self.target_size])
      # threshold
      output_thres = np.where(output < 150, -1.0, 1.0)

      return output_thres
项目:vrep-env    作者:ycps    | 项目源码 | 文件源码
def obj_get_vision_image(self, handle):
        resolution, image = self.RAPI_rc(vrep.simxGetVisionSensorImage( self.cID,handle,
            0, # assume RGB
            vrep.simx_opmode_blocking,
        ))
        dim, im = resolution, image
        nim = np.array(im, dtype='uint8')
        nim = np.reshape(nim, (dim[1], dim[0], 3))
        nim = np.flip(nim, 0)  # horizontal flip
        #nim = np.flip(nim, 2)  # RGB -> BGR
        return nim

    # "setters"
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def from_file(path, scale):
        ''' Reads a file into a new RGBImage instance, must be 24bpp/8bpc

        Args:
            path (`string`): path to a file.

            scale (`float`): pixel scale, in microns.

        Returns:
            `RGBImage`: a new image object.

        Notes:
            TODO: proper handling of images with more than 8bpp.
        '''
        # img is an mxnx3 array of unit8s
        img = imread(path).astype(config.precision) / 255

        img = np.flip(img, axis=0)

        # crop the image if it has an odd dimension.
        # TODO: change this an understand why it is an issue
        # fftshift vs ifftshift?
        if is_odd(img.shape[0]):
            img = img[0:-1, :, :]
        if is_odd(img.shape[1]):
            img = img[:, 0:-1, :]
        return RGBImage(r=img[:, :, 0], g=img[:, :, 1], b=img[:, :, 2],
                        sample_spacing=scale, synthetic=False)
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def fold_array(array, axis=1):
    ''' Folds an array in half over the given axis and averages.

    Args:
        array (`numpy.ndarray`): 2d array to fold.

        axis (`int`): axis to fold over.

    Returns
        numpy.ndarray:  new array.

    '''

    xs, ys = array.shape
    if axis is 1:
        xh = xs // 2
        left_chunk = array[:, :xh]
        right_chunk = array[:, xh:]
        folded_array = np.concatenate((right_chunk[:, :, np.newaxis],
                                       np.flip(np.flip(left_chunk, axis=1),
                                               axis=0)[:, :, np.newaxis]),
                                      axis=2)
    else:
        yh = ys // 2
        top_chunk = array[:yh, :]
        bottom_chunk = array[yh:, :]
        folded_array = np.concatonate((bottom_chunk[:, :, np.newaxis],
                                       np.flip(np.flip(top_chunk, axis=1),
                                               axis=0)[:, :, np.newaxis]),
                                      axis=2)
    return folded_array.mean(axis=2)
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def predict(self, img, flip_evaluation):
        """
        Predict segementation for an image.

        Arguments:
            img: must be rowsxcolsx3
        """
        h_ori, w_ori = img.shape[:2]
        if img.shape[0:2] != self.input_shape:
            print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
            img = misc.imresize(img, self.input_shape)
        input_data = self.preprocess_image(img)
        # utils.debug(self.model, input_data)

        regular_prediction = self.model.predict(input_data)[0]
        if flip_evaluation:
            print("Predict flipped")
            flipped_prediction = np.fliplr(self.model.predict(np.flip(input_data, axis=2))[0])
            prediction = (regular_prediction + flipped_prediction) / 2.0
        else:
            prediction = regular_prediction

        if img.shape[0:1] != self.input_shape:  # upscale prediction if necessary
            h, w = prediction.shape[:2]
            prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
                                      order=1, prefilter=False)
        return prediction
项目:shoelace    作者:rjagerman    | 项目源码 | 文件源码
def test_ndcg_minimal():

    # Set up data
    prediction = np.arange(10).astype(dtype=np.float32)
    ground_truth = np.flip(prediction, axis=0)

    # Compute and assert nDCG value
    assert_equal(ndcg(prediction, ground_truth).data, 0.39253964576233569)
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _bin_exp(self, n_bin, scale=1.0):
        """ Calculate the bin locations to approximate exponential distribution.
            It breaks the cumulative probability of exponential distribution
            into n_bin equal bins, each covering 1 / n_bin probability. Then it
            calculates the center of mass in each bins and returns the
            centers of mass. So, it approximates the exponential distribution
            with n_bin of Delta function weighted by 1 / n_bin, at the
            locations of these centers of mass.
        Parameters:
        -----------
        n_bin: int
            The number of bins to approximate the exponential distribution
        scale: float, default: 1.0
            The scale parameter of the exponential distribution, defined in
            the same way as scipy.stats. It does not influence the ratios
            between the bins, but just controls the spacing between the bins.
            So generally users should not change its default.
        Returns:
        --------
        bins: numpy array of size [n_bin,]
            The centers of mass for each segment of the
            exponential distribution.
        """
        boundaries = np.flip(scipy.stats.expon.isf(
            np.linspace(0, 1, n_bin + 1),
            scale=scale), axis=0)
        bins = np.empty(n_bin)
        for i in np.arange(n_bin):
            bins[i] = utils.center_mass_exp(
                (boundaries[i], boundaries[i + 1]), scale=scale)
        return bins
项目:seismic-python    作者:malcolmw    | 项目源码 | 文件源码
def _read_fmm3d(self, inf):
        inf = open(inf, "r")
        self.nvgrids, self.nvtypes = [int(v) for v in inf.readline().split()[:2]]
        self.v_type_grids = {}
        for (typeID, gridID) in [(ivt, ivg) for ivt in range(1, self.nvtypes+1)
                                            for ivg in range(1, self.nvgrids+1)]:
            if typeID not in self.v_type_grids:
                self.v_type_grids[typeID] = {}
            model = {"typeID": typeID, "gridID": gridID}
            nrho, nlambda, nphi = [int(v) for v in inf.readline().split()[:3]]
            drho, dlambda, dphi = [float(v) for v in inf.readline().split()[:3]]
            rho0, lambda0, phi0 = [float(v) for v in inf.readline().split()[:3]]
            model["grid"] = seispy.geogrid.GeoGrid3D(np.degrees(lambda0),
                                              np.degrees(phi0),
                                              seispy.constants.EARTH_RADIUS - (rho0 + (nrho-1)*drho),
                                              nlambda, nphi, nrho,
                                              np.degrees(dlambda),
                                              np.degrees(dphi),
                                              drho)
            #model["coords"] = coords.SphericalCoordinates((nrho, nlambda, nphi))
            #model["coords"][...] = [[[[rho0 + irho * drho, ?/2 - (lambda0 + ilambda * dlambda), phi0 + iphi * dphi]
            #                           for iphi in range(nphi)]
            #                           for ilambda in range(nlambda)]
            #                           for irho in range(nrho)]
            #model["coords"] = np.flip(model["coords"], axis=1)
            model["data"] = np.empty((nrho, nlambda, nphi))
            model["data"][...] = [[[float(inf.readline().split()[0])
                                    for iphi in range(nphi)]
                                    for ilambda in range(nlambda)]
                                    for irho in range(nrho)]
            model["data"] = np.flip(model["data"], axis=1)
            self.v_type_grids[typeID][gridID] = model
项目:VLTPF    作者:avigan    | 项目源码 | 文件源码
def _shift_interp_builtin(array, shift_value, mode='constant', cval=0):
    shifted = ndimage.shift(array, np.flip(shift_value, 0), order=3, mode=mode, cval=cval)

    return shifted
项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def draw(self, vertices, colors, mvp):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glUseProgram(self.program)
        glUniformMatrix4fv(self.mvpMatrix, 1, GL_FALSE, mvp)

        glEnableVertexAttribArray(0)
        glBindBuffer(GL_ARRAY_BUFFER, self.vertexBuf)
        glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW);
        glVertexAttribPointer(
            0,                  # attribute
            vertices.shape[1],                  # size
            GL_FLOAT,           # type
            GL_FALSE,           # normalized?
            0,                  # stride
            None            # array buffer offset
        );

        glEnableVertexAttribArray(1)
        glBindBuffer(GL_ARRAY_BUFFER, self.colorBuf)
        glBufferData(GL_ARRAY_BUFFER, colors, GL_STATIC_DRAW);
        glVertexAttribPointer(
            1,                                # attribute
            colors.shape[1],                                # size
            GL_FLOAT,                         # type
            GL_FALSE,                         # normalized?
            0,                                # stride
            None                          # array buffer offset
        );

        glDrawArrays(GL_TRIANGLES, 0, vertices.shape[0])
        glDisableVertexAttribArray(0)
        glDisableVertexAttribArray(1)
        glUseProgram(0)
        glutSwapBuffers()

        rgb = glReadPixels(0, 0, self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE, outputType=None)
        z = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT, outputType=None)
        rgb = np.flip(np.flip(rgb, 0), 1)
        z = np.flip(np.flip(z, 0), 1)
        return rgb, z
项目:diluvian    作者:aschampion    | 项目源码 | 文件源码
def augment_subvolume(self):
        subv = self.subvolume
        shape = subv.image.shape[self.axis]
        seed = subv.seed.copy()
        seed[self.axis] = shape - subv.seed[self.axis] - 1
        subv = Subvolume(np.flip(subv.image, self.axis),
                         np.flip(subv.label_mask, self.axis) if subv.label_mask is not None else None,
                         seed,
                         subv.label_id)
        return subv
项目:pytorch_fnet    作者:AllenCellModeling    | 项目源码 | 文件源码
def flip(images, flips):
    """
    Flips images based on the calculations from get_flips()
    :param images: Either a single n-dimensional image as a numpy array or a list of them.
    The images to flip
    :param flips: The output from get_flips(), tells the function which axes to flip the images along
    All images will be flipped the same way
    :return: Either a single flipped copy of the input image, or a list of them in the same order that they
    were passed in, depending on whether the 'images' parameter was a single picture or a list
    """
    if isinstance(images, (list, tuple)):
        return_list = True
        image_list = images
    else:
        return_list = False
        image_list = [images]
    out = []
    for img in image_list:
        # probably the most I've type 'flip' in my life
        flipped = img
        for flip_axis in flips:
            flipped = np.flip(flipped, flip_axis)
        out.append(flipped.copy())
    if return_list:
        return out
    else:
        return out[0]
项目:Unet_3D    作者:zhengyang-wang    | 项目源码 | 文件源码
def flip(inputs, labels, axis):
    '''
    axis : integer. Axis in array, which entries are reversed.
    '''
    return np.flip(inputs, axis), np.flip(labels, axis)
项目:depth-semantic-fully-conv    作者:iapatil    | 项目源码 | 文件源码
def __call__(self, inputs,target_depth,target_label):
        if random.random() < 0.5:
            inputs = np.flip(inputs,axis=0).copy()
            target_depth = np.flip(target_depth,axis=0).copy()
            target_label = np.flip(target_label,axis=0).copy()
        return inputs,target_depth,target_label
项目:CIKM2017    作者:heliarmk    | 项目源码 | 文件源码
def agg(file_name,store_file):

    datas = joblib.load(file_name)
    new_datas = []

    for data in datas:
        new_datas.append(data)
        new_datas.append({"input":np.flip(data["input"],axis=2),"label":data["label"]})
        new_datas.append({"input":np.flip(data["input"],axis=3),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=1,axes=(2,3)),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=2,axes=(2,3)),"label":data["label"]})
        #new_datas.append({"input":np.rot90(m=data["input"],k=3,axes=(2,3)),"label":data["label"]})

    joblib.dump(value=new_datas,filename=store_file,compress=3)
项目:pwr    作者:tjlaboss    | 项目源码 | 文件源码
def get_radial_power_by_tally(self, state, tally_id, index=None, eps=0):
        """Get the radial power of a specific tally with a known ID

        Parameters:
        -----------
        state:          openmc.StatePoint with this Mesh_Group's tally results
        tally_id:       int; id of the desired openmc.Tally
        index:          int; index of the z-layer within the Tally's mesh.
                        If the index is None, the sum of all the Tally's
                        layers will be returned.
                        [Default: None]

        Returns:
        --------
        xyarray:        numpy.array of the radial power profile
        """
        tally = state.tallies[tally_id]
        talvals = tally.get_values()
        nz = len(talvals)//(self._nx*self._ny)
        talvals.shape = (nz, self._ny, self._nx)
        talvals = np.flip(talvals, 1)
        if index:
            xyarray = talvals[index, :, :]
        else:
            xyarray = np.zeros((self._ny, self._nx))
            for i in range(nz):
                xyarray += talvals[i, :, :]
        xyarray[xyarray <= eps] = np.NaN
        return xyarray
项目:neural-combinatorial-optimization-rl-tensorflow    作者:MichelDeudon    | 项目源码 | 文件源码
def swap2opt(tsptw_sequence,i,j):
    new_tsptw_sequence = np.copy(tsptw_sequence)
    new_tsptw_sequence[i:j+1] = np.flip(tsptw_sequence[i:j+1], axis=0) # flip or swap ?
    return new_tsptw_sequence

# One step of 2opt  = one double loop and return first improved sequence
项目:bird_classification    作者:halwai    | 项目源码 | 文件源码
def random_flip_lr(img):
    rand_num = np.random.rand(1)
    if rand_num > 0.5:
        img = np.flip(img, 1)
    return img
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def append_zeros_all(fls1, fls2, mode):
    lens1, lens2 = [], []
    for fl1, fl2 in zip(fls1, fls2):
        if mode == 'audio':
            lens1.append(fl1.shape[0]), lens2.append(fl2.shape[0])
        elif mode == 'specs':
            lens1.append(fl1.shape[0]), lens2.append(fl2.shape[0])
        else:
            raise ValueError('Whaaat?')

    inds1, lens1 = list(np.flip(np.argsort(lens1),0)), np.flip(np.sort(lens1),0)
    inds2, lens2 = list(np.flip(np.argsort(lens2),0)), np.flip(np.sort(lens2),0)
    fls1, fls2 = np.array(fls1)[inds1], np.array(fls2)[inds2]
    maxlen = max([max(lens1), max(lens2)])

    mixes = []
    for i, (fl1, fl2) in enumerate(zip(fls1, fls2)):
        if mode == 'audio':
            fls1[i] = np.pad(fl1, (0, maxlen - fl1.shape[0]), 'constant')
            fls2[i] = np.pad(fl2, (0, maxlen - fl2.shape[0]), 'constant')
            mixes.append(fls1[i] + fls2[i])
        elif mode == 'specs':
            fls1[i] = np.pad(fl1, ((0, maxlen - fl1.shape[0]), (0, 0)), 'constant')
            fls2[i] = np.pad(fl2, ((0, maxlen - fl2.shape[0]), (0, 0)), 'constant')
        else:
            raise ValueError('Whaaat?')

    return list(fls1), list(fls2), mixes, lens1, lens2
项目:pylmnn    作者:johny-c    | 项目源码 | 文件源码
def pca_fit(X, var_ratio=1, return_transform=True):
    """

    Parameters
    ----------
    X : array_like
        An array of data samples with shape (n_samples, n_features).
    var_ratio : float
        The variance ratio to be captured (Default value = 1).
    return_transform : bool
        Whether to apply the transformation to the given data.

    Returns
    -------
    array_like
        If return_transform is True, an array with shape (n_samples, n_components) which is the input samples projected
        onto `n_components` principal components. Otherwise the first `n_components` eigenvectors of the covariance
        matrix corresponding to the `n_components` largest eigenvalues are returned as rows.

    """

    cov_ = np.cov(X, rowvar=False)  # Mean is removed
    evals, evecs = LA.eigh(cov_)  # Get eigenvalues in ascending order, eigenvectors in columns
    evecs = np.fliplr(evecs)  # Flip eigenvectors to get them in descending eigenvalue order

    if var_ratio == 1:
        L = evecs.T
    else:
        evals = np.flip(evals, axis=0)
        var_exp = np.cumsum(evals)
        var_exp = var_exp / var_exp[-1]
        n_components = np.argmax(np.greater_equal(var_exp, var_ratio))
        L = evecs.T[:n_components]  # Set the first n_components eigenvectors as rows of L

    if return_transform:
        return X.dot(L.T)
    else:
        return L
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def _slice_cube(cube):
    """Creates 2D slices from a 3D volume.

    Args:
        cube: a [N x N x N] numpy array

    Returns:
        slices: a [N x N x 9] array of 2D slices
    """

    slices = np.zeros((cube.shape[0], cube.shape[0], 9), dtype=np.float32)

    # axis-aligned
    slices[:,:,0] = cube[np.floor(cube.shape[0] / 2).astype(int), :, :]
    slices[:,:,1] = cube[:, np.floor(cube.shape[0] / 2).astype(int), :]
    slices[:,:,2] = cube[:, :, np.floor(cube.shape[0] / 2).astype(int)]

    # diagonals
    slices[:,:,3] = cube.diagonal(axis1=0, axis2=1)
    slices[:,:,4] = cube.diagonal(axis1=0, axis2=2)
    slices[:,:,5] = cube.diagonal(axis1=1, axis2=2)
    slices[:,:,6] = np.flip(cube, 0).diagonal(axis1=0, axis2=1)
    slices[:,:,7] = np.flip(cube, 0).diagonal(axis1=0, axis2=2)
    slices[:,:,8] = np.flip(cube, 1).diagonal(axis1=1, axis2=2)

    return slices
项目:wradlib    作者:wradlib    | 项目源码 | 文件源码
def set_raster_origin(data, coords, direction):
    """ Converts Data and Coordinates Origin

    .. versionadded 0.10.0

    Parameters
    ----------
    data : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols) or (bands, rows, cols) containing
        the data values.
    coords : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols, 2) containing xy-coordinates.
    direction : str
        'lower' or 'upper', direction in which to convert data and coordinates.

    Returns
    -------
    data : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols) or (bands, rows, cols) containing
        the data values.
    coords : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols, 2) containing xy-coordinates.
    """
    x_sp, y_sp = coords[1, 1] - coords[0, 0]
    origin = ('lower' if y_sp > 0 else 'upper')
    same = (origin == direction)
    if not same:
        data = np.flip(data, axis=-2)
        coords = np.flip(coords, axis=-3)
        # we need to shift y-coordinate if data and coordinates have the same
        # number of rows and cols
        if data.shape[-2:] == coords.shape[:2]:
            coords += [0, y_sp]

    return data, coords
项目:wradlib    作者:wradlib    | 项目源码 | 文件源码
def test_set_raster_origin(self):
        data, coords = georef.set_raster_origin(self.data.copy(),
                                                self.coords.copy(), 'upper')
        np.testing.assert_array_equal(data, self.data)
        np.testing.assert_array_equal(coords, self.coords)
        data, coords = georef.set_raster_origin(self.data.copy(),
                                                self.coords.copy(), 'lower')
        np.testing.assert_array_equal(data, np.flip(self.data, axis=-2))
        np.testing.assert_array_equal(coords, np.flip(self.coords, axis=-3))
项目:wradlib    作者:wradlib    | 项目源码 | 文件源码
def get_image(self, header):
        print(header)
        prod = SIGMET_DATA_TYPES[header.get('data_type')]
        x_size = header.get('x_size')
        y_size = header.get('y_size')
        z_size = header.get('z_size')
        cnt = x_size * y_size * z_size
        data = self.read_from_record(cnt, prod['dtype'])
        data = self.decode_data(data, prod=prod)
        data.shape = (z_size, y_size, x_size)
        return np.flip(data, axis=1)
项目:TikZ    作者:ellisk42    | 项目源码 | 文件源码
def drawTrace(self):
        data = np.zeros((256, 256), dtype=np.uint8)
        surface = cairo.ImageSurface.create_for_data(data,cairo.FORMAT_A8,256,256)
        context = cairo.Context(surface)
        t = [np.zeros((256,256))]
        for l in self.lines:
            l.draw(context)
            t.append(np.flip(data, 0)/255.0)
        return t
项目:bubblesub    作者:rr-    | 项目源码 | 文件源码
def work(self, task):
        pts = task

        audio_frame = int(pts * self._api.audio.sample_rate / 1000.0)
        first_sample = (
            audio_frame >> DERIVATION_DISTANCE) << DERIVATION_DISTANCE
        sample_count = 2 << DERIVATION_SIZE

        samples = self._api.audio.get_samples(first_sample, sample_count)
        samples = np.mean(samples, axis=1)
        sample_fmt = self._api.audio.sample_format
        if sample_fmt is None:
            return pts, np.zeros((1 << DERIVATION_SIZE) + 1)
        elif sample_fmt == ffms.FFMS_FMT_S16:
            samples /= 32768.
        elif sample_fmt == ffms.FFMS_FMT_S32:
            samples /= 4294967296.
        elif sample_fmt not in (ffms.FFMS_FMT_FLT, ffms.FFMS_FMT_DBL):
            raise RuntimeError('Unknown sample format: {}'.format(sample_fmt))

        self._input[0:len(samples)] = samples
        out = self._fftw()

        scale_factor = 9 / np.sqrt(1 * (1 << DERIVATION_SIZE))
        out = np.log(
            np.sqrt(
                np.real(out) * np.real(out)
                + np.imag(out) * np.imag(out)
            ) * scale_factor + 1)

        out *= 255
        out = np.clip(out, 0, 255)
        out = np.flip(out, axis=0)
        out = out.astype(dtype=np.uint8)
        return pts, out
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def getYae(Xae, reverseUtt):
    assert len(Xae.shape) in [3,4], 'Invalid number of dimensions for Xae: %i (must be 3 or 4)' % len(Xae.shape)
    if reverseUtt:
        Yae = np.flip(Xae, 1)
        if len(Xae.shape) == 4:
            Yae = np.flip(Yae, 2)
    else:
        Yae = Xae
    return Yae
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_axes(self):
        self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
        self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
        self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_basic_lr(self):
        a = get_mat(4)
        b = a[:, ::-1]
        assert_equal(np.flip(a, 1), b)
        a = [[0, 1, 2],
             [3, 4, 5]]
        b = [[2, 1, 0],
             [5, 4, 3]]
        assert_equal(np.flip(a, 1), b)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_basic_ud(self):
        a = get_mat(4)
        b = a[::-1, :]
        assert_equal(np.flip(a, 0), b)
        a = [[0, 1, 2],
             [3, 4, 5]]
        b = [[3, 4, 5],
             [0, 1, 2]]
        assert_equal(np.flip(a, 0), b)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_3d_swap_axis0(self):
        a = np.array([[[0, 1],
                       [2, 3]],
                      [[4, 5],
                       [6, 7]]])

        b = np.array([[[4, 5],
                       [6, 7]],
                      [[0, 1],
                       [2, 3]]])

        assert_equal(np.flip(a, 0), b)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_3d_swap_axis1(self):
        a = np.array([[[0, 1],
                       [2, 3]],
                      [[4, 5],
                       [6, 7]]])

        b = np.array([[[2, 3],
                       [0, 1]],
                      [[6, 7],
                       [4, 5]]])

        assert_equal(np.flip(a, 1), b)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_4d(self):
        a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
        for i in range(a.ndim):
            assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))