Python numpy 模块,fliplr() 实例源码

我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用numpy.fliplr()

项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def load_image_array_flowers(image_file, image_size):
    img = skimage.io.imread(image_file)
    # GRAYSCALE
    if len(img.shape) == 2:
        img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
        img_new[:,:,0] = img
        img_new[:,:,1] = img
        img_new[:,:,2] = img
        img = img_new

    img_resized = skimage.transform.resize(img, (image_size, image_size))

    # FLIP HORIZONTAL WIRH A PROBABILITY 0.5
    if random.random() > 0.5:
        img_resized = np.fliplr(img_resized)


    return img_resized.astype('float32')
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def transform(self, images):
        if self._aug_flag:
            transformed_images =\
                np.zeros([images.shape[0], self._imsize, self._imsize, 3])
            ori_size = images.shape[1]
            for i in range(images.shape[0]):
                h1 = np.floor((ori_size - self._imsize) * np.random.random())
                w1 = np.floor((ori_size - self._imsize) * np.random.random())
                cropped_image =\
                    images[i][w1: w1 + self._imsize, h1: h1 + self._imsize, :]
                if random.random() > 0.5:
                    transformed_images[i] = np.fliplr(cropped_image)
                else:
                    transformed_images[i] = cropped_image
            return transformed_images
        else:
            return images
项目:pyVSR    作者:georgesterpu    | 项目源码 | 文件源码
def zz(matrix, nb):
    r"""Zig-zag traversal of the input matrix
    :param matrix: input matrix
    :param nb: number of coefficients to keep
    :return: an array of nb coefficients
    """
    flipped = np.fliplr(matrix)
    rows, cols = flipped.shape  # nb of columns

    coefficient_list = []

    for loop, i in enumerate(range(cols - 1, -rows, -1)):
        anti_diagonal = np.diagonal(flipped, i)

        # reversing even diagonals prioritizes the X resolution
        # reversing odd diagonals prioritizes the Y resolution
        # for square matrices, the information content is the same only when nb covers half of the matrix
        #  e.g. [ nb = n*(n+1)/2 ]
        if loop % 2 == 0:
            anti_diagonal = anti_diagonal[::-1]  # reverse anti_diagonal

        coefficient_list.extend([x for x in anti_diagonal])

    # flattened = [val for sublist in coefficient_list for val in sublist]
    return coefficient_list[:nb]
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def transform(self, images):
        if self._aug_flag:
            transformed_images =\
                np.zeros([images.shape[0], self._imsize, self._imsize, 3])
            ori_size = images.shape[1]
            for i in range(images.shape[0]):
                h1 = np.floor((ori_size - self._imsize) * np.random.random())
                w1 = np.floor((ori_size - self._imsize) * np.random.random())
                cropped_image =\
                    images[i][w1: w1 + self._imsize, h1: h1 + self._imsize, :]
                if random.random() > 0.5:
                    transformed_images[i] = np.fliplr(cropped_image)
                else:
                    transformed_images[i] = cropped_image
            return transformed_images
        else:
            return images
项目:inky-phat    作者:pimoroni    | 项目源码 | 文件源码
def update(self):
        self._display_init()

        x1, x2 = self.update_x1, self.update_x2
        y1, y2 = self.update_y1, self.update_y2

        region = self.buffer[y1:y2, x1:x2]

        if self.v_flip:
            region = numpy.fliplr(region)

        if self.h_flip:
            region = numpy.flipud(region)

        buf_red = numpy.packbits(numpy.where(region == RED, 1, 0)).tolist()
        if self.inky_version == 1:
            buf_black = numpy.packbits(numpy.where(region == 0, 0, 1)).tolist()
        else:
            buf_black = numpy.packbits(numpy.where(region == BLACK, 0, 1)).tolist()

        self._display_update(buf_black, buf_red)
        self._display_fini()
项目:KittiClass    作者:MarvinTeichmann    | 项目源码 | 文件源码
def _make_data_gen(hypes, phase, data_dir):
    """Return a data generator that outputs image samples."""
    if phase == 'train':
        data_file = hypes['data']["train_file"]
    elif phase == 'val':
        data_file = hypes['data']["val_file"]
    else:
        assert False, "Unknown Phase %s" % phase

    data_file = os.path.join(data_dir, data_file)

    data = _load_gt_file(hypes, data_file)

    for image, label in data:

        if phase == 'val':
            assert(False)
        elif phase == 'train':

            yield resize_input(hypes, image, label)

            yield resize_input(hypes, np.fliplr(image), label)
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def low_rank_align(X, Y, Cxy, d=None, mu=0.8):
    """Input: data matrices X,Y,  correspondence matrix Cxy,
              embedding dimension d, and correspondence weight mu
       Output: embedded X and embedded Y
    """
    nx, dx = X.shape
    ny, dy = Y.shape
    assert Cxy.shape==(nx,ny), \
        'Correspondence matrix must be shape num_X_samples X num_Y_samples.'
    C = np.fliplr(block_diag(np.fliplr(Cxy),np.fliplr(Cxy.T)))
    if d is None:
        d = min(dx,dy)
    Rx = low_rank_repr(X,d)
    Ry = low_rank_repr(Y,d)
    R = block_diag(Rx,Ry)
    tmp = np.eye(R.shape[0]) - R
    M = tmp.T.dot(tmp)
    L = laplacian(C)
    eigen_prob = (1-mu)*M + 2*mu*L
    _,F = eigh(eigen_prob,eigvals=(1,d),overwrite_a=True)
    Xembed = F[:nx]
    Yembed = F[nx:]
    return Xembed, Yembed
项目:signal_subspace    作者:scivision    | 项目源码 | 文件源码
def corrmtx(x,m):
    """
    from https://github.com/cokelaer/spectrum/
    like matlab corrmtx(x,'mod'), with a different normalization factor.
    """
    x = np.asarray(x, dtype=float)
    assert x.ndim == 1, '1-D only'

    N = x.size

    Tp = toeplitz(x[m:N], x[m::-1])

    C = np.zeros((2*(N-m), m+1), dtype=x.dtype)

    for i in range(0, N-m):
        C[i] = Tp[i]

    Tp = np.fliplr(Tp.conj())
    for i in range(N-m, 2*(N-m)):
        C[i] = Tp[i-N+m]

    return C
项目:qiskit-sdk-py    作者:QISKit    | 项目源码 | 文件源码
def concurrence(state):
    """Calculate the concurrence.

    Args:
        state (np.array): a quantum state
    Returns:
        concurrence.
    """
    rho = np.array(state)
    if rho.ndim == 1:
        rho = outer(state)
    if len(state) != 4:
        raise Exception("Concurence is not defined for more than two qubits")

    YY = np.fliplr(np.diag([-1, 1, 1, -1]))
    A = rho.dot(YY).dot(rho.conj()).dot(YY)
    w = la.eigh(A, eigvals_only=True)
    w = np.sqrt(np.maximum(w, 0))
    return max(0.0, w[-1]-np.sum(w[0:-1]))


###############################################################
# Other.
###############################################################
项目:human-pose-estimation-by-deep-learning    作者:HYPJUDY    | 项目源码 | 文件源码
def _random_flip_lr(self, images, labels):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        rand_u = np.random.uniform(0.0, 1.0, images.shape[0])
        rand_cond = rand_u > 0.5

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)

        for idx in xrange(images.shape[0]):
            condition = rand_cond[idx]
            if condition:
                # "flip"
                o_images[idx] = np.fliplr(images[idx])
                o_labels[idx, ::2] = self.float_max - labels[idx, ::2]
                o_labels[idx, 1::2] = labels[idx, 1::2]
            else:
                # "origin"
                o_images[idx] = images[idx]
                o_labels[idx] = labels[idx]

        return o_images, o_labels
项目:human-pose-estimation-by-deep-learning    作者:HYPJUDY    | 项目源码 | 文件源码
def _batch_random_flip_lr(self, images, labels):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        rand_u = np.random.uniform(0.0, 1.0)
        rand_cond = rand_u > 0.5

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)

        for idx in xrange(images.shape[0]):
            condition = rand_cond
            if condition:
                # "flip"
                o_images[idx] = np.fliplr(images[idx])
                o_labels[idx, ::2] = self.float_max - labels[idx, ::2]
                o_labels[idx, 1::2] = labels[idx, 1::2]
            else:
                # "origin"
                o_images[idx] = images[idx]
                o_labels[idx] = labels[idx]

        return o_images, o_labels
项目:BAG_framework    作者:ucb-art    | 项目源码 | 文件源码
def __init__(self, hmat, m, n, tper, k, out0):
        hmat = np.asarray(hmat)
        if hmat.shape != (2 * m + 1, n + 1):
            raise ValueError('hmat shape = %s not compatible with M=%d, N=%d' %
                             (hmat.shape, m, n))

        # use symmetry to fill in negative input frequency data.
        fullh = np.empty((2 * m + 1, 2 * n + 1), dtype=complex)
        fullh[:, n:] = hmat / (k * tper)
        fullh[:, :n] = np.fliplr(np.flipud(fullh[:, n + 1:])).conj()

        self.hmat = fullh
        wc = 2.0 * np.pi / tper
        self.m_col = np.arange(-m, m + 1) * (1.0j * wc)
        self.n_col = np.arange(-n, n + 1) * (1.0j * wc / k)
        self.m_col = self.m_col.reshape((-1, 1))
        self.n_col = self.n_col.reshape((-1, 1))
        self.tper = tper
        self.k = k
        self.outfun = interp.interp1d(out0[:, 0], out0[:, 1], bounds_error=True,
                                      assume_sorted=True)
项目:progressive_a3c    作者:seann999    | 项目源码 | 文件源码
def rgb2gray(self, rgb, i=0):
        if FLAGS.save_frames:
            if self.thread_index == 0 and len(os.listdir(os.path.join(FLAGS.model_dir, "images"))) < 1000:
                scipy.misc.imsave("%s/%i.png" % (os.path.join(FLAGS.model_dir, "images"), i), rgb["image"][0])

        img = np.asarray(rgb["image"][0])[..., :3]
        img = np.dot(img, [0.299, 0.587, 0.114])
        img = scipy.misc.imresize(img, (84, 84)) / 255.0
        #flip H
        #
        #img = np.fliplr(img)



        return img
        #return -np.dot(img, [0.299, 0.587, 0.114]) / 255.0 + 1.0
项目:hintbot    作者:madebyollin    | 项目源码 | 文件源码
def sliceImages(inputImage, targetImage):
    inputSlices = []
    targetSlices = []
    sliceSize = 32
    for y in range(0,inputImage.shape[1]//sliceSize):
        for x in range(0,inputImage.shape[0]//sliceSize):
            inputSlice = inputImage[x*sliceSize:(x+1)*sliceSize,y*sliceSize:(y+1)*sliceSize]
            targetSlice = targetImage[x*sliceSize//2:(x+1)*sliceSize//2,y*sliceSize//2:(y+1)*sliceSize//2]
            # only add slices if they're not just empty space
            # if (np.any(targetSlice)):
                # Reweight smaller sizes
                # for i in range(0,max(1,128//inputImage.shape[1])**2):
            inputSlices.append(inputSlice)
            targetSlices.append(targetSlice)
                # inputSlices.append(np.fliplr(inputSlice))
                # targetSlices.append(np.fliplr(targetSlice))
                # inputSlices.append(np.flipud(inputSlice))
                # targetSlices.append(np.flipud(targetSlice))

                    # naiveSlice = imresize(inputSlice, 0.5)
                    # deltaSlice = targetSlice - naiveSlice
                    # targetSlices.append(deltaSlice)
    # return two arrays of images in a tuple
    return (inputSlices, targetSlices)
项目:DeepNet    作者:hok205    | 项目源码 | 文件源码
def transform(patch, flip=False, mirror=False, rotations=[]):
    """Perform data augmentation on a patch.

    Args:
        patch (numpy array): The patch to be processed.
        flip (bool, optional): Up/down symetry.
        mirror (bool, optional): left/right symetry.
        rotations (int list, optional) : rotations to perform (angles in deg).

    Returns:
        array list: list of augmented patches
    """
    transformed_patches = [patch]
    for angle in rotations:
        transformed_patches.append(skimage.img_as_ubyte(skimage.transform.rotate(patch, angle)))
    if flip:
        transformed_patches.append(np.flipud(patch))
    if mirror:
        transformed_patches.append(np.fliplr(patch))
    return transformed_patches


# In[4]:
项目:MLAB_Intuit    作者:rykard95    | 项目源码 | 文件源码
def results(db, model, comp_func, k=1, expand_label=False):
    if k < 1:
        raise ValueError
    score_matrix = generate_cross_scores(local_db, model, comp_func, expand_label)
    labels = np.array(get_labels())

    # Sort scores in descending order
    top_score_ind = np.argsort(score_matrix[:, :score_matrix.shape[1]-2,], axis=1)
    top_score_ind = np.fliplr(top_score_ind)

    # Get top K guesses
    y_hat = []
    for i in range(k):
        y_hat.append(labels[top_score_ind[:,i]])
    y_hat = np.vstack(y_hat).T

    y = score_matrix[:, score_matrix.shape[1]-1]
    score_pool = []
    for i in range(k):
        score_pool.append((y == y_hat[:, i]).astype(int))
    score_pool = np.vstack(score_pool).T

    r = np.max(score_pool, axis=1)
    return float(np.count_nonzero(r)) / float(len(r)), score_matrix
项目:behavioral-cloning    作者:BillZito    | 项目源码 | 文件源码
def show_file_images(filename, img_list):
  fig = plt.figure()

  #for 9 random images, print them 
  for img_num in range(0, 9):
    random_num = random.randint(0, len(img_list))
    img_name = img_list[random_num]
    print('image name is ', img_name)
    img = misc.imread(filename + img_name)
    np_img = np.array(img)
    flipped_img = np.fliplr(np_img)[60:160]

    # print('img is ', img)
    img = img[60:160]
    fig.add_subplot(5, 5, img_num * 2 + 1)
    plt.imshow(img)
    fig.add_subplot(5, 5, img_num * 2 + 2)
    plt.imshow(flipped_img)

  plt.show()
项目:backtrackbb    作者:BackTrackBB    | 项目源码 | 文件源码
def Gaussian2D(image, sigma, padding=0):
    n, m = image.shape[0], image.shape[1]
    tmp = np.zeros((n + padding, m + padding))
    if tmp.shape[0] < 4:
        raise ValueError('Image and padding too small')
    if tmp.shape[1] < 4:
        raise ValueError('Image and padding too small')
    B, A = __gausscoeff(sigma)
    tmp[:n, :m] = image
    tmp = lfilter(B, A, tmp, axis=0)
    tmp = np.flipud(tmp)
    tmp = lfilter(B, A, tmp, axis=0)
    tmp = np.flipud(tmp)
    tmp = lfilter(B, A, tmp, axis=1)
    tmp = np.fliplr(tmp)
    tmp = lfilter(B, A, tmp, axis=1)
    tmp = np.fliplr(tmp)
    return tmp[:n, :m]
#-----------------------------------------------------------------------------
项目:ai-ama-exercises    作者:JKCooper2    | 项目源码 | 文件源码
def num_attackers(node):
    board = node.state

    t_board = np.transpose(board)
    f_board = np.fliplr(board)

    total_attackers = 0

    q = np.where(board == 1)

    for i in range(len(q[0])):
        a_x = q[0][i]
        a_y = q[1][i]

        point = board[a_x][a_y]
        a_row = sum(board[a_x]) - point
        a_col = sum(t_board[a_y]) - point
        a_diag1 = sum(board.diagonal(a_y - a_x)) - point
        a_diag2 = sum(f_board.diagonal(len(board) - a_x - a_y - 1)) - point

        total_attackers += a_row + a_col + a_diag1 + a_diag2

    return total_attackers
项目:zorro    作者:C-CINA    | 项目源码 | 文件源码
def loadBoxFile(self, box_name ):
        box_data = np.loadtxt( box_name, comments="_" )
        # box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
        self.boxLen = box_data[0,2]

        # In boxfiles coordinates are at the edges.
        self.boxYX = np.fliplr( box_data[:,:2] )
        # DEBUG: The flipping of the y-coordinate system is annoying...
        print( "boxYX.shape = " + str(self.boxYX.shape) + ", len = " + str(self.boxLen) )
        self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]
        self.boxYX[:,1] += int( self.boxLen / 2 )
        self.boxYX[:,0] -= int( self.boxLen/2)
        try:
            self.boxFoM = box_data[:,4]

            clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
            self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )

        except:
            self.boxFoM = np.ones( self.boxYX.shape[0] )
        self.boxColors = plt.cm.gnuplot( self.boxFoM )
项目:zorro    作者:C-CINA    | 项目源码 | 文件源码
def loadStarFile(self, box_name ):
        box_data = np.loadtxt( box_name, comments="_", skiprows=5 )
        # box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
        # In star files coordinates are centered
        self.boxYX = np.fliplr( box_data[:,:2] )
        # DEBUG: The flipping of the y-coordinate system is annoying...
        self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]

        # There's no box size information in a star file so we have to use a guess
        self.boxLen = 224
        #self.boxYX[:,1] -= int( self.boxLen / 2 )
        #self.boxYX[:,0] += int( self.boxLen / 2 )
        try:
            self.boxFoM = box_data[:,4]
            clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
            self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )

        except:
            self.boxFoM = np.ones( self.boxYX.shape[0] )

        self.boxColors = plt.cm.gnuplot( self.boxFoM )
项目:pix2pix-human    作者:Engineering-Course    | 项目源码 | 文件源码
def preprocess_A_and_B(img_A, img_B, load_size=286, fine_size=256, flip=True, is_test=False):
    if is_test:
        img_A = scipy.misc.imresize(img_A, [fine_size, fine_size])
        img_B = scipy.misc.imresize(img_B, [fine_size, fine_size])
    else:
        img_A = scipy.misc.imresize(img_A, [load_size, load_size])
        img_B = scipy.misc.imresize(img_B, [load_size, load_size])

        h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        img_A = img_A[h1:h1+fine_size, w1:w1+fine_size]
        img_B = img_B[h1:h1+fine_size, w1:w1+fine_size]

        if flip and np.random.random() > 0.5:
            img_A = np.fliplr(img_A)
            img_B = np.fliplr(img_B)

    return img_A, img_B

# -----------------------------

# new added function for lip dataset, saving pose
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def process(self, im):
        # if side is right flip so it becomes right
        if self.side != 'left':
            im = np.fliplr(im)

        # slope of the perspective
        slope = tan(radians(self.degrees))
        (h, w, _) = im.shape

        matrix_trans = np.array([[1, 0, 0],
                                [-slope/2, 1, slope * h / 2],
                                [-slope/w, 0, 1 + slope]])

        trans = ProjectiveTransform(matrix_trans)
        img_trans = warp(im, trans)
        if self.side != 'left':
            img_trans = np.fliplr(img_trans)
        return img_trans
项目:tf-Faster-RCNN    作者:kevinjliang    | 项目源码 | 文件源码
def _applyImageFlips(image, flips):
    '''
    Apply left-right and up-down flips to an image

    Args:
        image (numpy array 2D/3D): image to be flipped
        flips (tuple):
            [0]: Boolean to flip horizontally
            [1]: Boolean to flip vertically

    Returns:
        Flipped image
    '''
    image = np.fliplr(image) if flips[0] else image
    image = np.flipud(image) if flips[1] else image

    return image
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def _read_image_as_array(path, dtype, load_size, crop_size, flip):
    f = Image.open(path)

    A, B = numpy.array_split(numpy.asarray(f), 2, axis=1)
    if hasattr(f, 'close'):
        f.close()

    A = _resize(A, load_size, Image.BILINEAR, dtype)
    B = _resize(B, load_size, Image.NEAREST, dtype)

    sx, sy = numpy.random.randint(0, load_size-crop_size, 2)
    A = _crop(A, sx, sy, crop_size)
    B = _crop(B, sx, sy, crop_size)

    if flip and numpy.random.rand() > 0.5:
        A = numpy.fliplr(A)
        B = numpy.fliplr(B)

    return A.transpose(2, 0, 1), B.transpose(2, 0, 1)
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def _create_mesh(self):
        log.debug('Creating mesh for box primitive')
        box = self._unit_box
        vertices, faces, normals = box.vertices, box.faces, box.face_normals
        vertices = points.transform_points(vertices * self.box_extents, 
                                           self.box_transform)
        normals = np.dot(self.box_transform[0:3,0:3], 
                         normals.T).T
        aligned = windings_aligned(vertices[faces[:1]], normals[:1])[0]
        if not aligned:
            faces = np.fliplr(faces)        
        # for a primitive the vertices and faces are derived from other information
        # so it goes in the cache, instead of the datastore
        self._cache['vertices'] = vertices
        self._cache['faces']    = faces
        self._cache['face_normals'] = normals
项目:Triplet_Loss_SBIR    作者:TuBui    | 项目源码 | 文件源码
def retrieve2file(self, out_file, numn=0, nump=0):
    """highly customised for hardsel"""
    ids = self.retrieve()
    ret_labels = self.label_src[ids]
    rel = ret_labels == self.label_q
    #include/exclude the relevant in hard pos/neg selection
    pos = ids[rel].reshape([rel.shape[0],-1])
    pos = np.fliplr(pos)                       #hard positive
    neg = ids[~rel].reshape([rel.shape[0],-1]) #hard negative

    if nump > 0 and nump < pos.shape[1]:
      pos = pos[:,0:nump]
    if numn > 0 and numn < neg.shape[1]:
      neg = neg[:,0:numn]
    if out_file.endswith('.npz'):
      np.savez(out_file, pos = pos, neg = neg)

    P = np.cumsum(rel,axis=1) / np.arange(1,rel.shape[1]+1,dtype=np.float32)[None,...]
    AP = np.sum(P*rel,axis=1) / (rel.sum(axis=1) + np.finfo(np.float32).eps)
    mAP = AP.mean()
    return mAP
项目:pix2pix-tensorflow    作者:yenchenlin    | 项目源码 | 文件源码
def preprocess_A_and_B(img_A, img_B, load_size=286, fine_size=256, flip=True, is_test=False):
    if is_test:
        img_A = scipy.misc.imresize(img_A, [fine_size, fine_size])
        img_B = scipy.misc.imresize(img_B, [fine_size, fine_size])
    else:
        img_A = scipy.misc.imresize(img_A, [load_size, load_size])
        img_B = scipy.misc.imresize(img_B, [load_size, load_size])

        h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        img_A = img_A[h1:h1+fine_size, w1:w1+fine_size]
        img_B = img_B[h1:h1+fine_size, w1:w1+fine_size]

        if flip and np.random.random() > 0.5:
            img_A = np.fliplr(img_A)
            img_B = np.fliplr(img_B)

    return img_A, img_B

# -----------------------------
项目:tensorflow-cyclegan    作者:rickbarraza    | 项目源码 | 文件源码
def preprocess_A_and_B(img_A, img_B, load_size=286, fine_size=256, flip=True, is_test=False):
    if is_test:
        img_A = scipy.misc.imresize(img_A, [fine_size, fine_size])
        img_B = scipy.misc.imresize(img_B, [fine_size, fine_size])
    else:
        img_A = scipy.misc.imresize(img_A, [load_size, load_size])
        img_B = scipy.misc.imresize(img_B, [load_size, load_size])

        h1 = int(np.ceil(np.random.uniform(1e-2, load_size - fine_size)))
        w1 = int(np.ceil(np.random.uniform(1e-2, load_size - fine_size)))
        img_A = img_A[h1:h1 + fine_size, w1:w1 + fine_size]
        img_B = img_B[h1:h1 + fine_size, w1:w1 + fine_size]

        if flip and np.random.random() > 0.5:
            img_A = np.fliplr(img_A)
            img_B = np.fliplr(img_B)

    return img_A, img_B


# DEFINE OUR SAMPLING FUNCTIONS
# -------------------------------------------------------
项目:crowddynamics    作者:jaantollander    | 项目源码 | 文件源码
def update(self):
        agents = self.simulation.agents.array
        field = self.simulation.field

        for target in range(len(field.targets)):
            has_target = agents['target'] == target
            if not has_target.size:
                continue

            mgrid, distance_map, direction_map = field.navigation_to_target(
                target, self.step, self.radius, self.strength)

            # Flip x and y to array index i and j
            indices = np.fliplr(mgrid.indicer(agents[has_target]['position']))
            new_direction = getdefault(
                indices, direction_map, agents[has_target]['target_direction'])
            agents['target_direction'][has_target] = new_direction
项目:crowddynamics    作者:jaantollander    | 项目源码 | 文件源码
def update(self):
        agents = self.simulation.agents.array
        field = self.simulation.field

        # FIXME: virtual obstacles add too much computational overhead
        # obstacles = geom_to_linear_obstacles(
        #     field.obstacles.buffer(0.3, resolution=3))
        obstacles = geom_to_linear_obstacles(field.obstacles)
        direction_herding = leader_follower_with_herding_interaction(
            agents, obstacles, self.sight_follower, self.size_nearest_other)
        is_follower = agents['is_follower']
        agents['target_direction'][is_follower] = direction_herding[is_follower]

        # Set target direction for herding agents that do not have a target
        # if field.obstacles is None:
        #     agents['target_direction'][is_follower] = direction_herding[is_follower]
        # else:
        #     # Obstacle avoidance
        #     mgrid = field.meshgrid(self.step)
        #     dir_map_obs, dmap_obs = field.direction_map_obstacles(self.step)
        #     indices = np.fliplr(mgrid.indicer(agents['position'][is_follower]))
        #     direction = obstacle_handling_continuous(
        #         dmap_obs, dir_map_obs, direction_herding[is_follower], indices,
        #         self.radius, self.strength)
        #     agents['target_direction'][is_follower] = direction
项目:camelyon-segmentation    作者:erenhalici    | 项目源码 | 文件源码
def augment_image(self, image, i):
    if i == 0:
      return np.rot90(image)
    elif i == 1:
      return np.rot90(image,2)
    elif i == 2:
      return np.rot90(image,3)
    elif i == 3:
      return image
    elif i == 4:
      return np.fliplr(image)
    elif i == 5:
      return np.flipud(image)
    elif i == 6:
      return image.transpose(1,0,2)
    elif i == 7:
      return np.fliplr(np.rot90(image))
项目:camelyon-segmentation    作者:erenhalici    | 项目源码 | 文件源码
def augment_image(self, image, i):
    if i == 0:
      return np.rot90(image)
    elif i == 1:
      return np.rot90(image,2)
    elif i == 2:
      return np.rot90(image,3)
    elif i == 3:
      return image
    elif i == 4:
      return np.fliplr(image)
    elif i == 5:
      return np.flipud(image)
    elif i == 6:
      return image.transpose(1,0,2)
    elif i == 7:
      return np.fliplr(np.rot90(image))
项目:magphase    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def add_hermitian_half(m_data, data_type='mag'):

    if (data_type == 'mag') or (data_type == 'magnitude'):
        m_data = np.hstack((m_data , np.fliplr(m_data[:,1:-1])))

    elif data_type == 'phase':        
        m_data[:,0]  = 0            
        m_data[:,-1] = 0   
        m_data = np.hstack((m_data , -np.fliplr(m_data[:,1:-1])))

    elif data_type == 'zeros':
        nfrms, nFFThalf = m_data.shape
        m_data = np.hstack((m_data , np.zeros((nfrms,nFFThalf-2))))

    elif data_type == 'complex':
        m_data_real = add_hermitian_half(m_data.real)
        m_data_imag = add_hermitian_half(m_data.imag, data_type='phase')
        m_data      = m_data_real + m_data_imag * 1j

    return m_data

# Remove hermitian half of fft-based data:-------------------------------------
# Works for either even or odd fft lenghts.
项目:magphase    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def mcep_to_lin_sp_log(mgc_mat, nFFT):

    nFrms, n_coeffs = mgc_mat.shape
    nFFTHalf = 1 + nFFT/2

    mgc_mat = np.concatenate((mgc_mat, np.zeros((nFrms, (nFFT/2 - n_coeffs + 1)))),1)
    mgc_mat = np.concatenate((mgc_mat, np.fliplr(mgc_mat[:,1:-1])),1)
    sp_log  = (np.fft.fft(mgc_mat, nFFT,1)).real
    sp_log  = sp_log[:,0:nFFTHalf]

    return sp_log 


#Gets RMS from matrix no matter the number of bins m_data has, 
#it figures out according to the FFT length.
# For example, nFFT = 128 , nBins_data= 60 (instead of 65 or 128)
项目:artorithmia    作者:alichtner    | 项目源码 | 文件源码
def extract_symmetry(self):
        """
        Calculate the symmetry of the image by substracting left from right.

        Input:  None
        Output: None
        """
        # currently this is only for horizontal symmetry
        if len(self.image.shape) == 3:
            height, width, _ = self.image.shape
        else:
            height, width = self.image.shape
        if width % 2 != 0:
            width -= 1
            pixels = height * width
            left = self.image[:, :width/2]
            right = self.image[:, width/2:-1]
        else:
            pixels = height * width
            left = self.image[:, :width/2]
            right = self.image[:, width/2:]
        left_gray = color.rgb2gray(left)
        right_gray = color.rgb2gray(right)
        self.symmetry = np.abs(left_gray -
                               np.fliplr(right_gray)).sum()/(pixels/1.*2)
项目:paints_tf    作者:latte0    | 项目源码 | 文件源码
def preprocess_A_and_B(img_A, img_B, load_size=286, fine_size=256, flip=True, is_test=False):
    if is_test:
        img_A = scipy.misc.imresize(img_A, [fine_size, fine_size])
        img_B = scipy.misc.imresize(img_B, [fine_size, fine_size])
    else:
        img_A = scipy.misc.imresize(img_A, [load_size, load_size])
        img_B = scipy.misc.imresize(img_B, [load_size, load_size])

        h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))
        img_A = img_A[h1:h1+fine_size, w1:w1+fine_size]
        img_B = img_B[h1:h1+fine_size, w1:w1+fine_size]

        if flip and np.random.random() > 0.5:
            img_A = np.fliplr(img_A)
            img_B = np.fliplr(img_B)

    return img_A, img_B

# -----------------------------
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def flip(image, random_flip):
    if random_flip and np.random.choice([True, False]):
        image = np.fliplr(image)
    return image
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def is_cross(self, candidate):
        '''
            first check the trace
            then flip and check the opposite trace

        '''
        return (candidate.trace()+np.fliplr(candidate).trace()) == (candidate.shape[0]*2)
项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def load_image_array(image_file, image_size,
                     image_id, data_dir='Data/datasets/mscoco/train2014',
                     mode='train'):
    img = None
    if os.path.exists(image_file):
        #print('found' + image_file)
        img = skimage.io.imread(image_file)
    else:
        print('notfound' + image_file)
        img = skimage.io.imread('http://mscoco.org/images/%d' % (image_id))
        img_path = os.path.join(data_dir, 'COCO_%s2014_%.12d.jpg' % ( mode,
                                                                      image_id))
        skimage.io.imsave(img_path, img)

    # GRAYSCALE
    if len(img.shape) == 2:
        img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
        img_new[:,:,0] = img
        img_new[:,:,1] = img
        img_new[:,:,2] = img
        img = img_new

    img_resized = skimage.transform.resize(img, (image_size, image_size))

    # FLIP HORIZONTAL WIRH A PROBABILITY 0.5
    if random.random() > 0.5:
        img_resized = np.fliplr(img_resized)

    return img_resized.astype('float32')
项目:C3D-tensorflow    作者:hx173149    | 项目源码 | 文件源码
def randomHorizontalFlip(rand_seed,img, u=0.5):
    if rand_seed < u:
        img = cv2.flip(img,1)  #np.fliplr(img)  #cv2.flip(img,1) ##left-right
    return img
项目:Wall-EEG    作者:neurotechuoft    | 项目源码 | 文件源码
def plotmultichannel(data, params=None): 
# TODO Receive Labels as arguments
    """
    Creates a plot to present multichannel data

    Arguments
    data:  Multichannel Data [n_samples, n_channels]
    params: information about the data acquisition device being
    """  
    plt.figure()        

    n_samples = data.shape[0]
    n_channels = data.shape[1]

    if params is not None:
        fs = params['sampling frequency']
        names = params['names of channels']
    else:
        fs = 1
        names = [""] * n_channels

    time_vec = np.arange(n_samples) / float(fs)

    data = np.fliplr(data)
    offset = 0
    for i_channel in range (0, n_channels):
        data_ac = data[:,i_channel] - np.mean(data[:,i_channel])
        offset = offset + 2 * np.max(np.abs(data_ac))        
        plt.plot(time_vec, data_ac + offset, label=names[i_channel])        

    plt.xlabel('Time [s]');
    plt.ylabel('Amplitude');
    plt.legend()        
    plt.draw()
项目:BirdProject    作者:ZlodeiBaal    | 项目源码 | 文件源码
def AddNoize(i):
    R = random.randint(0, 1)
    if (R==1):
        i=np.fliplr(i)#random mirroring
    R = random.randint(0, 1)
    if (R==1):
        R = random.randint(-10, 10)
        i= ndimage.interpolation.rotate(i,R)#random rotation
    R = random.randint(0, 1)
    if (R==1):
        crop_left=random.randint(0,15)
        crop_right = random.randint(1, 15)
        crop_top = random.randint(0, 15)
        crop_bot = random.randint(1, 15)
        i=i[crop_left:-crop_right,crop_top:-crop_bot,:] #Randomcrop
    #Next code is VERY SLOW becoase it use Python to change brightness
    #need to optimase it, but i have no time yet:)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in R channel
        d = random.random()+1
        i[:, :, 0] = adjust_gamma(i[:,:,0],d)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in G channel
        d = random.random()+1
        i[:, :, 1] = adjust_gamma(i[:, :, 1], d)
    R = random.randint(0, 2)
    if (R==2): #Random brightness in B channel
        d = random.random()+1
        i[:, :, 2] = adjust_gamma(i[:, :, 2], d)
    #misc.imsave("test.jpg",i)
    return i


#Prepare data for learning
项目:lps-anchor-pos-estimator    作者:bitcraze    | 项目源码 | 文件源码
def polynomials2matrix(polynomial):
    p = eqsize(polynomial)
    nt = sum(nterms(p))
    nv = nvars(p[0])

    M = zeros((nv, nt), dtype=int32)
    inds = [None] * p.size
    k = 0
    for i in range(0, p.size):
        inds.append[i] = range(k, k + nterms(p[i]) - 1)
        M[:, k:k + nterms(p[i]) - 1] = monomials(p[i])
        k = k + nterms(p[i])

    neg_M_sum = -1 * M.sum(axis=0)
    M_trans = M.conj().T
    neg_M_sum_trans = neg_M_sum.conj().T
    M_fliplr = fliplr(M_trans)

    new_grev_M = concatenate(neg_M_sum_trans, M_fliplr)

    _, ia, ib = unique(new_grev_M)

    M = float(M[:, ia])

    mon = zeros(M.shape[1], 1)
    for i in range(M.shape[1], -1, -1):
        mon[i, 1] = Multipol.multipol(1, M[:, i])

    C = zeros(p.size, M.shape[0])
    for i in range(0, p.size):
        ind = ib[inds[i]]
        C[i, ind] = coeffs(p[i])

    return C, mon
项目:Densenet-Tensorflow    作者:taki0112    | 项目源码 | 文件源码
def _random_flip_leftright(batch):
    for i in range(len(batch)):
        if bool(random.getrandbits(1)):
            batch[i] = np.fliplr(batch[i])
    return batch
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def predict(self, img, flip_evaluation):
        """
        Predict segementation for an image.

        Arguments:
            img: must be rowsxcolsx3
        """
        h_ori, w_ori = img.shape[:2]
        if img.shape[0:2] != self.input_shape:
            print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
            img = misc.imresize(img, self.input_shape)
        input_data = self.preprocess_image(img)
        # utils.debug(self.model, input_data)

        regular_prediction = self.model.predict(input_data)[0]
        if flip_evaluation:
            print("Predict flipped")
            flipped_prediction = np.fliplr(self.model.predict(np.flip(input_data, axis=2))[0])
            prediction = (regular_prediction + flipped_prediction) / 2.0
        else:
            prediction = regular_prediction

        if img.shape[0:1] != self.input_shape:  # upscale prediction if necessary
            h, w = prediction.shape[:2]
            prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
                                      order=1, prefilter=False)
        return prediction