Python numpy 模块,rot90() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.rot90()

项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def inner_extract(self,B,signature):
        w,h =B.shape[:2]

        LL,(HL,LH,HH) = pywt.dwt2(B[:32*(w//32),:32*(h//32)],'haar') 
        LL_1,(HL_1,LH_1,HH_1) = pywt.dwt2(LL,'haar') 
        LL_2,(HL_2,LH_2,HH_2) = pywt.dwt2(LL_1,'haar') 
        LL_3,(HL_3,LH_3,HH_3) = pywt.dwt2(LL_2,'haar')
        LL_4,(HL_4,LH_4,HH_4) = pywt.dwt2(LL_3,'haar')

        _,_,_,ori_sig = self._gene_embed_space(HH_3)

        ext_sigs=[]
        ext_sigs.extend(self._extract_sig(ori_sig,len(signature)))
        ext_sigs.extend(self._extract_sig(np.rot90(ori_sig,1),len(signature)))
        ext_sigs.extend(self._extract_sig(np.rot90(ori_sig,2),len(signature)))
        ext_sigs.extend(self._extract_sig(np.rot90(ori_sig,3),len(signature)))

        return ext_sigs
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def mk_rotations(img):
    #
    #   DESCRIPTION
    #       This function create 8 roatation image fro an input image 4 rotation from the raw image and 4 rotation form the transposed 
    #   
    #   INPUTS
    #       img np.array 
    #       
    #   OUTPUTS
    #       rotated_image_img, img90, img180, img270, imgT, imgT90, imgT180,imgT270
    #
    #
    img90 = np.rot90(img)
    img180 = np.rot90(img,k=2)
    img270 = np.rot90(img,k=3)
    imgT = np.zeros(img.shape)
    if np.size(img.shape)>2:
        for i in range(3):
            imgT[:,:,i] =img[:,:,i].T
    else:
        imgT = img.T
    imgT90 = np.rot90(imgT)
    imgT180 = np.rot90(imgT, k=2)
    imgT270 = np.rot90(imgT, k=3)
    return img, img90, img180, img270, imgT, imgT90, imgT180,imgT270
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def unrotate(rot0, rot1, rot2, rot3, rot4, rot5, rot6, rot7):
    #
    #   DESCRIPTION 
    #       Functions that merges the 8 mapped images as described in the beginning of the file back to the original format
    #       Uses element wise product  
    #
    #
    unrot = np.copy(rot0)
    unrot*=np.rot90((rot1),k=3)
    unrot*=np.rot90((rot2),k=2)
    unrot*=np.rot90((rot3),k=1) 

    unrot*=(rot4.T)
    unrot*=np.rot90((rot5),k=3).T
    unrot*=np.rot90((rot6),k=2).T 
    unrot*=np.rot90((rot7),k=1).T 

    return unrot

##                      ##
##                      ##
##      EXECUTION       ##
##                      ##
##                      ##
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def mk_rotations(img):
    ##INPUT:
    ##  img: a 3D RGB array
    ##OUTPUT
    ##  8 rotated and transposed versions of img

    img90 = np.rot90(img)
    img180 = np.rot90(img,k=2)
    img270 = np.rot90(img,k=3)
    imgT = np.zeros(img.shape)
    if np.size(img.shape)>2:
        for i in range(3):
            imgT[:,:,i] =img[:,:,i].T
    else:
        imgT = img.T
    imgT90 = np.rot90(imgT)
    imgT180 = np.rot90(imgT, k=2)
    imgT270 = np.rot90(imgT, k=3)
    return img, img90, img180, img270, imgT, imgT90, imgT180,imgT270

## Formats an image to save format
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def getMagSpec(sig, rate, winlen, winstep, NFFT):

    #get frames
    winfunc = lambda x:np.ones((x,))
    frames = psf.sigproc.framesig(sig, winlen*rate, winstep*rate, winfunc)        

    #Magnitude Spectrogram
    magspec = np.rot90(psf.sigproc.magspec(frames, NFFT))

    return magspec

#Split signal into five-second chunks with overlap of 4 and minimum length of 3 seconds
#Use these settings for other chunk lengths:
#winlen, winstep, seconds
#0.05, 0.0097, 5s
#0.05, 0.0195, 10s
#0.05, 0.0585, 30s
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def getMagSpec(sig, rate, winlen, winstep, NFFT):

    #get frames
    winfunc = lambda x:np.ones((x,))
    frames = psf.sigproc.framesig(sig, winlen*rate, winstep*rate, winfunc)        

    #Magnitude Spectrogram
    magspec = np.rot90(psf.sigproc.magspec(frames, NFFT))

    return magspec

#Split signal into five-second chunks with overlap of 4 and minimum length of 1 second
#Use these settings for other chunk lengths:
#winlen, winstep, seconds
#0.05, 0.0097, 5s
#0.05, 0.0195, 10s
#0.05, 0.0585, 30s
项目:xdesign    作者:tomography    | 项目源码 | 文件源码
def test_discrete_phantom_uniform():
    """The uniform discrete phantom is the same after rotating 90 degrees."""

    d0 = discrete_phantom(p, 100, ratio=10, prop='mass_atten')

    p.rotate(theta=np.pi/2, point=Point([0.5, 0.5]))
    d1 = np.rot90(discrete_phantom(p, 100, ratio=10, prop='mass_atten'))

    # plot rotated phantom
    plot_phantom(p)

    # plot the error
    plt.figure()
    plt.imshow(d1-d0, interpolation=None)
    plt.colorbar()

    # plt.show(block=True)
    # assert_allclose(d0, d1)
项目:sealionengine    作者:gecrooks    | 项目源码 | 文件源码
def load_dotted_image(self, train_id, scale=1, border=0, circled=False):
        img = self._load_image('dotted', train_id, scale, border)

        if train_id in self.extra_masks:
            for row, col, radius in self.extra_masks[train_id]:
                rr, cc = skimage.draw.circle(row, col, radius, shape = img.shape)
                img = np.copy(img)
                img[rr, cc] = (0, 0, 0)

        # When dotted image is rotated relative to train, apply hot patch. (kudos: @authman)
        if train_id in self.dotted_rotate:
            rot = self.dotted_rotate[train_id]
            img = np.rot90(img, rot)

        if circled: 
            assert scale == 1
            assert border == 0
            img = np.copy(img)
            img = self.draw_circles(np.copy(img), self.tid_coords[train_id])        

        return img
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def get_lateral(self, resolution=0.5):
        if hasattr(self, "lateral") and resolution == resolution:
            return self.lateral
        max_dist = self.get_max_dist()
        dim = np.ceil(np.absolute(max_dist / resolution))
        max_dist = dim * resolution
        self.resolution = resolution
        a = np.meshgrid(np.linspace(0, max_dist, dim), np.linspace(0, max_dist, dim))
        r = (a[0]**2 + a[1]**2)**0.5
        sigma = self.sigma / ((8 * log(2))**0.5)
        lateral = 1 / ((2 * pi * sigma**2)**0.5) * np.exp(-(r**2) / (2 * sigma**2))
        tot_lat = np.zeros((2 * dim - 1, 2 * dim - 1))

        tot_lat[dim - 1:2 * dim - 1, dim - 1:2 * dim - 1] = lateral
        tot_lat[dim - 1:2 * dim - 1, 0:dim - 1] = np.rot90(lateral, 3)[:, 0:dim - 1]
        tot_lat[0:dim - 1, 0:dim - 1] = np.rot90(lateral, 2)[0:dim - 1, 0:dim - 1]
        tot_lat[0:dim - 1, dim - 1:2 * dim - 1] = np.rot90(lateral)[0:dim - 1, :]

        self.lateral = tot_lat
        return self.lateral
项目:MDT    作者:cbclab    | 项目源码 | 文件源码
def _apply_transformations(plot_config, data_slice):
    """Rotate, flip and zoom the data slice.

    Depending on the plot configuration, this will apply some transformations to the given data slice.

    Args:
        plot_config (mdt.visualization.maps.base.MapPlotConfig): the plot configuration
        data_slice (ndarray): the 2d slice of data to transform

    Returns:
        ndarray: the transformed 2d slice of data
    """
    if plot_config.rotate:
        data_slice = np.rot90(data_slice, plot_config.rotate // 90)

    if not plot_config.flipud:
        # by default we flipud to correct for matplotlib lower origin. If the user
        # sets flipud, we do not need to to it
        data_slice = np.flipud(data_slice)

    data_slice = plot_config.zoom.apply(data_slice)
    return data_slice
项目:pytorch_fnet    作者:AllenCellModeling    | 项目源码 | 文件源码
def _augment_chunks(self, chunks):
        if self.choices_augmentation is None:
            return chunks
        chunks_new = []
        choice = np.random.choice(self.choices_augmentation)
        for chunk in chunks:
            chunk_new = chunk
            if choice in [1, 3, 5, 7]:
                chunk_new = np.flip(chunk_new, axis=1)
            if   choice in [2, 3]:
                chunk_new = np.rot90(chunk_new, 1, axes=(1, 2))
            elif choice in [4, 5]:
                chunk_new = np.rot90(chunk_new, 2, axes=(1, 2))
            elif choice in [6, 7]:
                chunk_new = np.rot90(chunk_new, 3, axes=(1, 2))
            chunks_new.append(chunk_new)
        return chunks_new
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def random_augment_image(image, row):
    # start0_max, end0_max, start1_max, end1_max = get_bounding_boxes_positions(image, row)
    # image = cv2.rectangle(image, (int(start1_max), int(start0_max)), (int(end1_max), int(end0_max)), (0, 0, 255), thickness=5)
    if random.randint(0, 1) == 0:
        image = return_random_crop(image, row)
    else:
        image = return_random_perspective(image, row)
    image = random_rotate(image)

    # all possible mirroring and flips (in total there are only 8 possible configurations)
    mirror = random.randint(0, 1)
    if mirror != 0:
        image = image[::-1, :, :]
    angle = random.randint(0, 3)
    if angle != 0:
        image = np.rot90(image, k=angle)

    image = lightning_change(image)
    image = blur_image(image)

    return image
项目:solving-minesweeper-by-tensorflow    作者:staytime    | 项目源码 | 文件源码
def rot(self):
        for i in range(len(self.Q)):
            Q       = self.Q[i]
            ans     = self.ans[i]
            refArea = self.area[i]
            refMask = self.mask[i]

            for rotate in range(3):
                self.Q.append(Q)
                self.ans.append(ans)

                refArea = np.rot90(refArea)
                refMask = np.rot90(refMask)

                self.area.append(refArea)
                self.mask.append(refMask)

        return self
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def rot90(img):
    '''
    rotate one or multiple grayscale or color images 90 degrees
    '''
    s = img.shape
    if len(s) == 3:
        if s[2] in (3, 4):  # color image
            out = np.empty((s[1], s[0], s[2]), dtype=img.dtype)
            for i in range(s[2]):
                out[:, :, i] = np.rot90(img[:, :, i])
        else:  # mutliple grayscale
            out = np.empty((s[0], s[2], s[1]), dtype=img.dtype)
            for i in range(s[0]):
                out[i] = np.rot90(img[i])
    elif len(s) == 2:  # one grayscale
        out = np.rot90(img)
    elif len(s) == 4 and s[3] in (3, 4):  # multiple color
        out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype)
        for i in range(s[0]):  # for each img
            for j in range(s[3]):  # for each channel
                out[i, :, :, j] = np.rot90(img[i, :, :, j])
    else:
        NotImplemented
    return out
项目:NBAapi    作者:eyalshafran    | 项目源码 | 文件源码
def shot_heatmap(df,sigma = 1,log=False,player_pic=True,ax=None,cmap='jet'):
    '''
    This function plots a heatmap based on the shot chart.
    input - dataframe with x and y coordinates.
    optional - log (default false) plots heatmap in log scale. 
               player (default true) adds player's picture and name if true 
               sigma - the sigma of the Gaussian kernel. In feet (default=1)
    '''
    n,_,_ = np.histogram2d( 0.1*df['LOC_X'].values, 0.1*df['LOC_Y'].values,bins = [500, 500],range = [[-25,25],[-5.25,44.75]])
    KDE = ndimage.filters.gaussian_filter(n,10.0*sigma)
    N = 1.0*KDE/np.sum(KDE)
    if ax is None:
        ax = plt.gca(xlim = [30,-30],ylim = [-7,43],xticks=[],yticks=[],aspect=1.0)
    court(ax,outer_lines=True,color='black',lw=2.0,direction='down')
    ax.axis('off')
    if log:
        ax.imshow(np.rot90(np.log10(N+1)),cmap=cmap,extent=[25.0, -25.0, -5.25, 44.75])
    else:
        ax.imshow(np.rot90(N),cmap=cmap,extent=[25.0, -25.0, -5.25, 44.75])
    if player_pic:
        player_id = df.PLAYER_ID.values[0]
        pic = players_picture(player_id)
        ax.imshow(pic,extent=[15,25,30,37.8261])
    ax.text(0,-7,'By: Doingthedishes',color='white',horizontalalignment='center',fontsize=20,fontweight='bold')
项目:pershing    作者:qmn    | 项目源码 | 文件源码
def rot90(self, turns=1):
        """
        Rotates the blocks in the counter-clockwise direction. (As numpy
        does it.)
        """
        # Rotate the individual Y-layer matrices
        new_blocks = np.array([np.rot90(by, turns) for by in self.blocks])
        new_data = np.array([np.rot90(dy, turns) for dy in self.data])
        new_mask = np.array([np.rot90(my, turns) for my in self.mask])

        # Rotate the data (if applicable)
        for y in xrange(new_data.shape[0]):
            for z in xrange(new_data.shape[1]):
                for x in xrange(new_data.shape[2]):
                    b = new_blocks[y, z, x]
                    d = new_data[y, z, x]
                    new_data[y, z, x] = self.data_rot90(b, d, turns)

        return MaskedSubChunk(new_blocks, new_data, new_mask)
项目:pershing    作者:qmn    | 项目源码 | 文件源码
def data_rot90(self, block, data, turns):
        """
        Specially rotate this block, which has an orientation that depends on
        the data value.
        """
        blockname = blocks.block_names[block]

        # Torches (redstone and normal)
        torches = ["redstone_torch", "unlit_redstone_torch", "torch"]
        if blockname in torches:
            return blocks.Torch.rot90(data, turns)

        # Repeaters
        repeaters = ["unpowered_repeater", "powered_repeater"]
        if blockname in repeaters:
            return blocks.Repeater.rot90(data, turns)

        # Comparators
        comparators = ["unpowered_comparator", "powered_comparator"]
        if blockname in comparators:
            return blocks.Comparator.rot90(data, turns)

        return data
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def _display_pixels(x, y, counts, pixelsize):
    """
    Display pixels at coordinates (x, y) coloured with "counts".
    This routine is fast but not fully general as it assumes the spaxels
    are on a regular grid. This needs not be the case for Voronoi binning.

    """
    xmin, xmax = np.min(x), np.max(x)
    ymin, ymax = np.min(y), np.max(y)
    nx = int(round((xmax - xmin)/pixelsize) + 1)
    ny = int(round((ymax - ymin)/pixelsize) + 1)
    img = np.full((nx, ny), np.nan)  # use nan for missing data
    j = np.round((x - xmin)/pixelsize).astype(int)
    k = np.round((y - ymin)/pixelsize).astype(int)
    img[j, k] = counts

    plt.imshow(np.rot90(img), interpolation='nearest', cmap='prism',
               extent=[xmin - pixelsize/2, xmax + pixelsize/2,
                       ymin - pixelsize/2, ymax + pixelsize/2])

#----------------------------------------------------------------------
项目:sg-mcmc-survey    作者:delta2323    | 项目源码 | 文件源码
def visualize2D(fig, ax, xs, ys, bins=200,
                xlabel='x', ylabel='y',
                xlim=None, ylim=None):
    H, xedges, yedges = numpy.histogram2d(xs, ys, bins)
    H = numpy.rot90(H)
    H = numpy.flipud(H)
    Hmasked = numpy.ma.masked_where(H == 0, H)

    ax.pcolormesh(xedges, yedges, Hmasked)

    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    if xlim is None:
        xlim = (min(xs), max(xs))
    if ylim is None:
        ylim = (min(ys), max(ys))
    ax.set_xlim(*xlim)
    ax.set_ylim(*ylim)
    fig.colorbar(pyplot.contourf(Hmasked))
项目:hamaa    作者:monitor1379    | 项目源码 | 文件源码
def rot180(images):
    """
    ????180??
    ??HW/CHW/NCHW?????images?
    """
    out = np.empty(shape=images.shape, dtype=images.dtype)
    if images.ndim == 2:
        out = np.rot90(images, k=2)
    elif images.ndim == 3:
        for c in xrange(images.shape[0]):
            out[c] = np.rot90(images[c], k=2)
    elif images.ndim == 4:
        for n in xrange(images.shape[0]):
            for c in xrange(images.shape[1]):
                out[n][c] = np.rot90(images[n][c], k=2)
    else:
        raise Exception('Invalid ndim: ' + str(images.ndim) +
                        ', only support ndim between 2 and 4.')
    return out


# ????f????x???f???df???x???
项目:computer-vision-algorithms    作者:aleju    | 项目源码 | 文件源码
def grad_magnitude(img):
    """Calculate the gradient magnitude of an image.
    Args:
        img The image
    Returns:
        gradient image"""
    img = img / 255.0
    sobel_y = np.array([
        [-1, -2, -1],
        [0, 0, 0],
        [1, 2, 1]
    ])
    sobel_x = np.rot90(sobel_y) # rotates counter-clockwise

    # apply x/y sobel filter to get x/y gradients
    imgx = signal.correlate(img, sobel_x, mode="same")
    imgy = signal.correlate(img, sobel_y, mode="same")
    imgmag = np.sqrt(imgx**2 + imgy**2)

    return imgmag
项目:computer-vision-algorithms    作者:aleju    | 项目源码 | 文件源码
def main():
    """Load image, apply filter, plot."""
    img = data.camera()

    # just like sobel, but no -2/+2 in the middle
    prewitt_y = np.array([
        [-1, -1, -1],
        [0, 0, 0],
        [1, 1, 1]
    ])
    prewitt_x = np.rot90(prewitt_y) # rotates counter-clockwise

    img_sx = signal.correlate(img, prewitt_x, mode="same")
    img_sy = signal.correlate(img, prewitt_y, mode="same")
    g_magnitude = np.sqrt(img_sx**2 + img_sy**2)

    ground_truth = skifilters.prewitt(data.camera())

    util.plot_images_grayscale(
        [img, img_sx, img_sy, g_magnitude, ground_truth],
        ["Image", "Prewitt (x)", "Prewitt (y)", "Prewitt (both/magnitude)",
         "Prewitt (Ground Truth)"]
    )
项目:camelyon-segmentation    作者:erenhalici    | 项目源码 | 文件源码
def augment_image(self, image, i):
    if i == 0:
      return np.rot90(image)
    elif i == 1:
      return np.rot90(image,2)
    elif i == 2:
      return np.rot90(image,3)
    elif i == 3:
      return image
    elif i == 4:
      return np.fliplr(image)
    elif i == 5:
      return np.flipud(image)
    elif i == 6:
      return image.transpose(1,0,2)
    elif i == 7:
      return np.fliplr(np.rot90(image))
项目:camelyon-segmentation    作者:erenhalici    | 项目源码 | 文件源码
def augment_image(self, image, i):
    if i == 0:
      return np.rot90(image)
    elif i == 1:
      return np.rot90(image,2)
    elif i == 2:
      return np.rot90(image,3)
    elif i == 3:
      return image
    elif i == 4:
      return np.fliplr(image)
    elif i == 5:
      return np.flipud(image)
    elif i == 6:
      return image.transpose(1,0,2)
    elif i == 7:
      return np.fliplr(np.rot90(image))
项目:empymod    作者:empymod    | 项目源码 | 文件源码
def test_get_geo_fact():
    res = np.array([0.017051023225738, 0.020779123804907, -0.11077204227395,
                    -0.081155809427821, -0.098900024313067, 0.527229048585517,
                    -0.124497144079623, -0.151717673241039, 0.808796206796408])
    res2 = np.rot90(np.fliplr(res.reshape(3, -1))).ravel()

    # EE, MM
    ab = [11, 12, 13, 21, 22, 23, 31, 32, 33]
    i = 0
    for i in range(9):
        out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, False, False)
        assert_allclose(out[0], res[i])
        out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, True, True)
        assert_allclose(out[0], res[i])
        i += 1

    # ME, EM
    ab = [14, 15, 16, 24, 25, 26, 34, 35, 36]
    i = 0
    for i in range(9):
        out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, False, True)
        assert_allclose(out[0], res2[i])
        out = utils.get_geo_fact(ab[i], 13.45, 23.8, 124.3, 5.3, True, False)
        assert_allclose(out[0], res[i])
        i += 1
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def rotate_examples(X, y, files, extent, k=3):
    m,n = np.shape(X)
    augmentedX = np.ones(((k+1)*m,n))
    augmentedy = np.squeeze(np.ones(((k+1)*m,)))
    augmented_files = []
    for i in range(m):
        #print y[i]
        print((k+1)*i)
        augmentedX[(k+1)*i,:] *= X[i,:]
        augmentedy[(k+1)*i] *= y[i]
        #print augmentedy[(k+1)*i]
        augmented_files.append(files[i])
        for j in range(1,k+1):
            print(((k+1)*i)+j)
            rotatedX = np.rot90(np.reshape(X[i,:], (2*extent,2*extent), order="F"), j)
            augmentedX[((k+1)*i)+j,:] *= np.ravel(rotatedX, order="F")
            augmentedy[((k+1)*i)+j] *= y[i]
            augmented_files.append(files[i])
            #print augmentedX[:16,:2]
    #print np.shape(augmentedX)
    #print len(augmented_files)
    return augmentedX, augmentedy, augmented_files
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def rotate_examples(X, y, files, extent, k=3):
    m,n = np.shape(X)
    augmentedX = np.ones(((k+1)*m,n))
    augmentedy = np.squeeze(np.ones(((k+1)*m,)))
    augmented_files = []
    for i in range(m):
        #print y[i]
        print (k+1)*i
        augmentedX[(k+1)*i,:] *= X[i,:]
        augmentedy[(k+1)*i] *= y[i]
        #print augmentedy[(k+1)*i]
        augmented_files.append(files[i])
        for j in range(1,k+1):
            print ((k+1)*i)+j
            rotatedX = np.rot90(np.reshape(X[i,:], (2*extent,2*extent), order="F"), j)
            augmentedX[((k+1)*i)+j,:] *= np.ravel(rotatedX, order="F")
            augmentedy[((k+1)*i)+j] *= y[i]
            augmented_files.append(files[i])
            #print augmentedX[:16,:2]
    #print np.shape(augmentedX)
    #print len(augmented_files)
    return augmentedX, augmentedy, augmented_files
项目:niworkflows    作者:poldracklab    | 项目源码 | 文件源码
def transform_to_2d(data, max_axis):
    """
    Projects 3d data cube along one axis using maximum intensity with
    preservation of the signs. Adapted from nilearn.
    """
    import numpy as np
    # get the shape of the array we are projecting to
    new_shape = list(data.shape)
    del new_shape[max_axis]

    # generate a 3D indexing array that points to max abs value in the
    # current projection
    a1, a2 = np.indices(new_shape)
    inds = [a1, a2]
    inds.insert(max_axis, np.abs(data).argmax(axis=max_axis))

    # take the values where the absolute value of the projection
    # is the highest
    maximum_intensity_data = data[inds]

    return np.rot90(maximum_intensity_data)
项目:rascal-tensorflow    作者:stayrascal    | 项目源码 | 文件源码
def bp_sensitivity_map(self, sensitivity_array, activator):
        expanded_array = self.expand_sensitivity_map(sensitivity_array)
        expanded_width = expanded_array.shape[2]
        zp = (self.input_width + self.filter_width - 1 - expanded_width) / 2
        padded_array = padding(expanded_array, zp)
        self.delta_array = self.create_delta_array()
        for f in range(self.filter_number):
            filter = self.filters[f]
            flipped_weights = np.array(map(lambda i: np.rot90(i, 2), filter.get_weights()))
            delta_array = self.create_delta_array()
            for d in range(delta_array.shape[0]):
                conv(padded_array[f], flipped_weights[d], delta_array[d], 1, 0)
            self.delta_array += delta_array
        derivative_array = np.array(self.input_array)
        element_wise_op(derivative_array, activator.backward)
        self.delta_array *= derivative_array
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def makeImgPatchPrototype(D, compID):
    ''' Create image patch prototype for specific component
        Returns
        --------
        Xprototype : sqrt(D) x sqrt(D) matrix
    '''
    # Create a "prototype" image patch of PxP pixels
    P = np.sqrt(D)
    Xprototype = np.zeros((P, P))
    if compID % 4 == 0:
        Xprototype[:P / 2] = 1.0
        Xprototype = np.rot90(Xprototype, compID / 4)
    if compID % 4 == 1:
        Xprototype[np.tril_indices(P)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 1) / 4)
    if compID % 4 == 2:
        Xprototype[np.tril_indices(P, 2)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 2) / 4)
    if compID % 4 == 3:
        Xprototype[np.tril_indices(P, -2)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 3) / 4)
    return Xprototype
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def take_photo_at(self, camera_centre):
        with picamera.PiCamera() as camera:
            camera.resolution = config.CAMERA_RESOLUTION
            camera.framerate = 24
            with picamera.array.PiRGBArray(camera) as output:
                camera.capture(output, 'bgr', use_video_port=True)
                outputarray = output.array

            # Rotate image to oriented it with paper.
            outputarray = np.rot90(outputarray, 3)

            # Save photo.
            filename = datetime.datetime.now().strftime("%M%S.%f_") + \
                       str(camera_centre[0]) \
                       + '_' \
                       + str(camera_centre[1]) + '_Photo_' + str(self._photo_index) + '.jpg'

            cv2.imwrite(os.path.join(config.debug_output_folder, filename), outputarray)
            self._photo_index += 1

            return outputarray
项目:DRIP-SLIP    作者:NASA-DEVELOP    | 项目源码 | 文件源码
def sum48(newFile,extent):
    currentSum=loadtxt(os.path.join(getCurrentDirectory(),'Sum48','sum48.txt'),dtype='float',delimiter=',')
    historicFiles=sorted(glob.glob(os.path.join(getCurrentDirectory(),'Sum48','Files','*txt')))
    lastFile=loadtxt(os.path.join(getCurrentDirectory(),'Sum48','Files',historicFiles[0]),dtype='float',delimiter=',')
    currentSum=currentSum-lastFile
    currentSum=currentSum+newFile
    np.savetxt(os.path.join(getCurrentDirectory(),'Sum48','sum48.txt'),currentSum,delimiter=',')
    rotatedSum = np.rot90(currentSum)
    tiffFiles=glob.glob(os.path.join(getCurrentDirectory(),'Sum48','Tiffs','*.TIF'))
    if not tiffFiles:
        lastTifNum='1'
    else:
        tiffFiles=natsorted(tiffFiles,alg=ns.IC)
        lastTif=tiffFiles[-1]
        lastTifNum=str(int(lastTif[lastTif.rfind('_')+1:lastTif.rfind('.')])+1)
    array2raster(os.path.join(getCurrentDirectory(),'Sum48','Tiffs',timeStr[-11:-7]) + '_48HourSum_' + lastTifNum + '.TIF',[extent[0],extent[3]],extent[4],extent[5],rotatedSum,gdalconst.GDT_Float32)
    while len(tiffFiles)>48:
        os.remove(tiffFiles[0])
        tiffFiles=natsorted(glob.glob(os.path.join(getCurrentDirectory(),'Sum48','Tiffs','*.TIF')),alg=ns.IC)
    os.remove(historicFiles[0])

#sums the past 72 hours of rainfall, sends an email if exceeds threshold
项目:DRIP-SLIP    作者:NASA-DEVELOP    | 项目源码 | 文件源码
def sum72(newFile,extent):
    currentSum=loadtxt(os.path.join(getCurrentDirectory(),'Sum72','sum72.txt'),dtype='float',delimiter=',')
    historicFiles=sorted(glob.glob(os.path.join(getCurrentDirectory(),'Sum72','Files','*txt')))
    lastFile=loadtxt(os.path.join(getCurrentDirectory(),'Sum72','Files',historicFiles[0]),dtype='float',delimiter=',')
    currentSum=currentSum-lastFile
    currentSum=currentSum+newFile
    np.savetxt(os.path.join(getCurrentDirectory(),'Sum72','sum72.txt'),currentSum,delimiter=',')
    rotatedSum = np.rot90(currentSum)
    tiffFiles=glob.glob(os.path.join(getCurrentDirectory(),'Sum72','Tiffs','*.TIF'))
    if not tiffFiles:
        lastTifNum='1'
    else:
        tiffFiles=natsorted(tiffFiles,alg=ns.IC)
        lastTif=tiffFiles[-1]
        lastTifNum=str(int(lastTif[lastTif.rfind('_')+1:lastTif.rfind('.')])+1)
    array2raster(os.path.join(getCurrentDirectory(),'Sum72','Tiffs',timeStr[-11:-7]) + '_72HourSum_' + lastTifNum + '.TIF',[extent[0],extent[3]],extent[4],extent[5],rotatedSum,gdalconst.GDT_Float32)
    while len(tiffFiles)>48:
        os.remove(tiffFiles[0])
        tiffFiles=natsorted(glob.glob(os.path.join(getCurrentDirectory(),'Sum72','Tiffs','*.TIF')),alg=ns.IC)
    os.remove(historicFiles[0])

#sends an e-mail containing "attachment", currently to the authors
项目:RocAlphaGo    作者:Rochester-NRT    | 项目源码 | 文件源码
def symmetries(self):
        """returns a list of 8 GameState objects:
        all reflections and rotations of the current board

        does not check for duplicates
        """
        copies = [self.copy() for i in range(8)]
        # copies[0] is the original.
        # rotate CCW 90
        copies[1].board = np.rot90(self.board,1)
        # rotate 180
        copies[2].board = np.rot90(self.board,2)
        # rotate CCW 270
        copies[3].board = np.rot90(self.board,3)
        # mirror left-right
        copies[4].board = np.fliplr(self.board)
        # mirror up-down
        copies[5].board = np.flipud(self.board)
        # mirror \ diagonal
        copies[6].board = np.transpose(self.board)
        # mirror / diagonal (equivalently: rotate 90 CCW then flip LR)
        copies[7].board = np.fliplr(copies[1].board)
        return copies
项目:go-NN    作者:TheDuck314    | 项目源码 | 文件源码
def pick_move(self, color):
        if not self.opponent_passed and self.last_opponent_play:
            mirror_x = self.board.N - self.last_opponent_play[0] - 1
            mirror_y = self.board.N - self.last_opponent_play[1] - 1
            if self.board.play_is_legal(mirror_x, mirror_y, color):
                return (mirror_x, mirror_y)

        enemy_stones = (self.board.vertices == flipped_color[color])
        our_stones = (self.board.vertices == color)
        rot_enemy_stones = np.rot90(enemy_stones, 2)

        play_vertices = np.logical_and(rot_enemy_stones, np.logical_not(our_stones))
        play_vertices =  np.logical_and(play_vertices, np.logical_not(enemy_stones))

        for x in xrange(self.board.N):
            for y in xrange(self.board.N):
                if play_vertices[x,y] and self.board.play_is_legal(x, y, color):
                    return (x,y)

        center = (self.board.N/2, self.board.N/2)
        if self.board[center] == Color.Empty and self.board.play_is_legal(center[0], center[1], color):
            return center

        return None
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def saveImage(inputImage, name):
    # red = inputImage[:1024]
    # green = inputImage[1024:2048]
    # blue = inputImage[2048:]
    # formatted = np.zeros([3,32,32])
    # formatted[0] = np.reshape(red,[32,32])
    # formatted[1] = np.reshape(green,[32,32])
    # formatted[2] = np.reshape(blue,[32,32])
    # final = np.swapaxes(formatted,0,2)/255
    final = inputImage
    final = np.rot90(np.rot90(np.rot90(final)))
    imsave(name, final)
项目:marblecutter    作者:mojodna    | 项目源码 | 文件源码
def apply_latitude_adjustments(pixels):
    data, (bounds, crs), _ = pixels
    (_, height, width) = data.shape

    ys = np.interp(np.arange(height), [0, height - 1], [bounds[3], bounds[1]])
    xs = np.empty_like(ys)
    xs.fill(bounds[0])

    longitudes, latitudes = warp.transform(crs, WGS84_CRS, xs, ys)

    factors = 1 / np.cos(np.radians(latitudes))

    # convert to 2d array, rotate 270º, scale data
    return PixelCollection(data * np.rot90(np.atleast_2d(factors), 3),
                           pixels.bounds)
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def extract(self,ori_wmimage,wm, key=None):
        '''
            ??LSB??
        '''
        #???rgb?????????
        if len(ori_wmimage.shape)==3:
            wmimage = ori_wmimage[:,:,0]
        else:
            wmimage = ori_wmimage

        #???????        
        signature = self._gene_signature(wm,key).reshape((16,16))  

        #???????
        ext_sigs = self.ext_sig(wmimage,size=16)
        #ext_sigs.extend(self.ext_sig(np.rot90(wmimage,1)))
        #ext_sigs.extend(self.ext_sig(np.rot90(wmimage,2)))
        #ext_sigs.extend(self.ext_sig(np.rot90(wmimage,3)))

          #?????
        similarity = 0 
        for sig in ext_sigs:
            print(sig)
            print(signature)
            one_similarity = list(np.array(sig.flatten()) - signature.flatten()).count(0) / len(signature.flatten())
            #logging.info('????? : {}'.format(one_similarity))
            similarity = max(similarity,one_similarity )
            break

        logging.debug('???????????????%f (1???0?????????0.7)'  % (similarity))

        return similarity
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def inner_extract(self,B,signature):
        sig_size=np.int(np.sqrt(len(signature)))
        size = self.size

        ext_sigs =[] 
        #???????????????
        #???????????????????????
        #  (0,0)    (0,w-32)
        #  (h-32,0)    (h-32,w-32)
        w ,h = B.shape
        embed_pos =[(0,0)]
        embed_pos.append((w-sig_size*size,0))
        embed_pos.append((0,h-sig_size*size))
        embed_pos.append((w-sig_size*size,h-sig_size*size))

        for x,y in embed_pos:
            ext_sig = np.zeros(len(signature),dtype=np.int)

            for i in range(x,x+sig_size*size,size):
                for j in range(y,y+sig_size * size,size):
                    v = cv2.dct(np.float32(B[i:i+size,j:j+size]))
                    if v[size-1,size-1] > self.Q/2:
                        ext_sig[((i-x)//size)*sig_size+(j-y)//size] = 1 


            ext_sigs.append(ext_sig)
            ext_sig_arr = np.array(ext_sig).reshape((sig_size,sig_size))
            ext_sigs.append(np.rot90(ext_sig_arr,1).flatten())
            ext_sigs.append(np.rot90(ext_sig_arr,2).flatten())
            ext_sigs.append(np.rot90(ext_sig_arr,3).flatten())

        return ext_sigs
项目:train-DeepLab    作者:martinkersner    | 项目源码 | 文件源码
def convert_segmentation_mat2numpy(mat_file):
  np_segm = load_mat(mat_file)
  return np.rot90(np.fliplr(np.argmax(np_segm, axis=2)))
项目:train-DeepLab    作者:martinkersner    | 项目源码 | 文件源码
def load_binary_segmentation(bin_file, dtype='int16'):
  with open(bin_file, 'rb') as bf:
    rows = struct.unpack('i', bf.read(4))[0]
    cols = struct.unpack('i', bf.read(4))[0]
    channels = struct.unpack('i', bf.read(4))[0]

    num_values = rows * cols # expect only one channel in segmentation output
    out = np.zeros(num_values, dtype=np.uint8) # expect only values between 0 and 255

    for i in range(num_values):
      out[i] = np.uint8(struct.unpack('h', bf.read(2))[0])

    return np.rot90(np.fliplr(out.reshape((cols, rows))))
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def rot90(W):
    for i in range(W.shape[0]):
        for j in range(W.shape[1]):
            W[i, j] = np.rot90(W[i, j], 2)
    return W
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:u1234x1234    | 项目源码 | 文件源码
def rot90_mat(mat, k):
    n_mat = np.zeros(mat.shape, dtype=np.float32)
    for i in range(mat.shape[2]):
        n_mat[:, :, i] = np.rot90(mat[:, :, i], k)
    return n_mat
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def rotate_90(k=1):
    def call(x):
        x = np.rot90(x, k).copy()
        return x

    return call
项目:quoll    作者:LanguageMachines    | 项目源码 | 文件源码
def visualize_document_topics_heatmap(self, outfile, set_topics=False):
        self.sort_doctopics_groups()
        doctopics_raw_hm = numpy.rot90(self.document_topics_raw)
        rows, columns = doctopics_raw_hm.shape
        rownames = self.topic_labels
        columnnames = self.document_names
        pyplot.pcolor(doctopics_raw_hm, norm=None, cmap='Blues')
        pyplot.gca().invert_yaxis()
        if self.group_names:
            ticks_groups = []
            bounds = []
            current_group = False
            start = 0
            for i,doc in enumerate(self.document_names):
                group = self.document_group_dict[doc]
                if group != current_group:
                    if i != 0:
                        bounds.append(i-1)
                        ticks_groups[start+int((i-start)/2)] = current_group
                    current_group = group
                    start=i
                ticks_groups.append('')
            ticks_groups[start+int((i-start)/2)] = current_group
            pyplot.xticks(numpy.arange(columns)+0.5,ticks_groups, fontsize=11)
            if set_topics:
                for index in set_topics:
                    pyplot.axhline(y=index)
                topic_names = self.return_topic_names(set_topics)
                pyplot.yticks(set_topics,topic_names,fontsize=8)
            else:
                pyplot.yticks(numpy.arange(rows)+0.5, rownames, fontsize=8)
            for bound in bounds:
                pyplot.axvline(x=bound)
        pyplot.colorbar(cmap='Blues')
        pyplot.savefig(outfile)
        pyplot.clf()
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def _rotate(self, im, meta):
            """ Use Orientation information from EXIF meta data to 
            orient the image correctly. Freeimage is also supposed to
            support that, and I am pretty sure it once did, but now it
            does not, so let's just do it in Python.
            Edit: and now it works again, just leave in place as a fallback.
            """
            if self.request.kwargs.get('exifrotate', None) == 2:
                try:
                    ori = meta['EXIF_MAIN']['Orientation']
                except KeyError:  # pragma: no cover
                    pass  # Orientation not available
                else:  # pragma: no cover - we cannot touch all cases
                    # www.impulseadventure.com/photo/exif-orientation.html
                    if ori in [1, 2]:
                        pass
                    if ori in [3, 4]:
                        im = np.rot90(im, 2)
                    if ori in [5, 6]:
                        im = np.rot90(im, 3)
                    if ori in [7, 8]:
                        im = np.rot90(im)
                    if ori in [2, 4, 5, 7]:  # Flipped cases (rare)
                        im = np.fliplr(im)
            return im

    # --
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def image_function(self, image):
        return np.rot90(image, k=self.get_random_variable('k'))
项目:image_analysis    作者:CoDaS-Lab    | 项目源码 | 文件源码
def noise_amp(self, size):
        """
        DESCRIPTION:
            Creates a size x size matrix of randomly generated noise with
            amplitude values with 1/f slope

        ARGS:
            :size: size of matrix

        RETURNS:
            :returns the amplitudes with noise added
        """

        slope = 1
        x = y = np.linspace(1, size, size)
        xgrid, ygrid = np.meshgrid(x, y)  # coordinates for a square grid
        xgrid = np.subtract(xgrid, size // 2)
        ygrid = np.subtract(ygrid, size // 2)

        amp = self.fft.fftshift(np.divide(np.sqrt(np.square(xgrid) +
                                          np.square(ygrid)),
                                          size * np.sqrt(2)))
        amp = np.rot90(amp, 2)
        amp[0, 0] = 1
        amp = 1 / amp**slope
        amp[0, 0] = 0
        return amp