Python Image 模块,fromarray() 实例源码

我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用Image.fromarray()

项目:STS-PiLot    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:PiWifiCam    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def saveTestImage(img,filename=None,outDir=None):
  # Get image filename from current timestamp
  if filename is None:
    ts = time.time()
    formatStr = "%Y-%m-%d_%H-%M-%S"
    filestr = datetime.datetime.fromtimestamp(ts).strftime(formatStr)
    filename = filestr + ".png"

  if outDir is not None:
    mkdirNoForce(outDir)
    filename = outDir + "/" + filename

  # Save image
  im = Image.fromarray(toggleRGB(img))
  im.save(filename)

  # Return filename
  return filename
项目:py-ffmpeg    作者:dandydarcy    | 项目源码 | 文件源码
def NumPy2PIL(input):
    """Converts a numpy array to a PIL image.

    Supported input array layouts:
       2 dimensions of numpy.uint8
       3 dimensions of numpy.uint8
       2 dimensions of numpy.float32
    """
    if not isinstance(input, numpy.ndarray):
        raise TypeError, 'Must be called with numpy.ndarray!'
    # Check the number of dimensions of the input array
    ndim = input.ndim
    if not ndim in (2, 3):
        raise ValueError, 'Only 2D-arrays and 3D-arrays are supported!'
    if ndim == 2:
        channels = 1
    else:
        channels = input.shape[2]
    # supported modes list: [(channels, dtype), ...]
    modes_list = [(1, numpy.uint8), (3, numpy.uint8), (1, numpy.float32), (4,numpy.uint8)]
    mode = (channels, input.dtype)
    if not mode in modes_list:
        raise ValueError, 'Unknown or unsupported input mode'
    return Image.fromarray(input)
项目:kaggle_art    作者:small-yellow-duck    | 项目源码 | 文件源码
def do_split():
    if os.path.isdir('train') and os.path.isdir('test'):
        return

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    os.mkdir('train')
    os.mkdir('test')

    np.savetxt('labels_train.csv', y_train, header='label')
    np.savetxt('labels_test.csv', y_test, header='label')

    for i in xrange(X_train.shape[0]):
        im = Image.fromarray(np.uint8(X_train[i]))
        im.save('train'+str(i)+'.png')

    for i in xrange(X_test.shape[0]):
        im = Image.fromarray(np.uint8(X_test[i]))
        im.save('test'+str(i)+'.png')   


#if __name__ == "__main__":
项目:kaggle_art    作者:small-yellow-duck    | 项目源码 | 文件源码
def do_split():
    if os.path.isdir('train') and os.path.isdir('test'):
        return

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    os.mkdir('train')
    os.mkdir('test')

    np.savetxt('labels_train.csv', y_train, header='label')
    np.savetxt('labels_test.csv', y_test, header='label')

    for i in xrange(X_train.shape[0]):
        im = Image.fromarray(np.uint8(X_train[i]))
        im.save('train'+str(i)+'.png')

    for i in xrange(X_test.shape[0]):
        im = Image.fromarray(np.uint8(X_test[i]))
        im.save('test'+str(i)+'.png')
项目:kaggle_art    作者:small-yellow-duck    | 项目源码 | 文件源码
def do_split():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    os.mkdir('train')
    os.mkdir('test')

    np.savetxt('labels_train.csv', y_train, header='label')
    np.savetxt('labels_test.csv', y_test, header='label')

    for i in xrange(X_train.shape[0]):
        im = Image.fromarray(np.uint8(X_train[i]))
        im.save('train'+str(i)+'.png')

    for i in xrange(X_test.shape[0]):
        im = Image.fromarray(np.uint8(X_test[i]))
        im.save('test'+str(i)+'.png')
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def images_to_hdf5(dir_path, output_hdf5, size = (112,112), channels = 3, resize_to = None):
    files = sorted(os.listdir(dir_path))
    nr_of_images = len(files)
    if resize_to:
        size = resize_to
    i = 0
    pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=nr_of_images).start()
    data = np.empty(shape=(nr_of_images, size[0], size[1], channels), dtype=np.uint8)
    for f in files:
        datum = imread(dir_path + '/' + f)
        if resize_to:
            datum = np.asarray(Image.fromarray((datum), 'RGB').resize((size[0],size[1]), PIL.Image.ANTIALIAS))
        data[i,:,:,:] = datum
        i = i + 1
        pbar.update(i)
    pbar.finish()
    with h5py.File(output_hdf5, 'w') as hf:
        hf.create_dataset('data', data=data)
项目:telegram-genetic-bot    作者:Dantistnfs    | 项目源码 | 文件源码
def sequence_ocr_processing(image_url):
    image_downloaded = Image.open(BytesIO(requests.get(image_url).content))
    image_converted = image_downloaded.convert('L')
    numpy_picture = np.array(image_converted).astype(np.uint8)
    start = time.time()
    image_processed = Image.fromarray(numpy_picture)
    edge_dog = mahotas.dog(numpy_picture,sigma1=4,multiplier=1.5)
    first_dilation = mahotas.dilate(edge_dog, np.ones((15,30)))
    #second_dilation = mahotas.dilate(first_dilation, np.ones((15,30)))
    labeled, nr_objects = mahotas.label(first_dilation)
    bboxes = mahotas.labeled.bbox(labeled)
    draw = ImageDraw.Draw(image_processed)
    width, height = image_processed.size
    font = ImageFont.truetype("arial.ttf", int(height/15))
    for index in range(1,len(bboxes)):
        box_coordinates = bboxes[index]
        draw.rectangle([box_coordinates[2],box_coordinates[0],box_coordinates[3],box_coordinates[1]])
        draw.text([(box_coordinates[2]+5),box_coordinates[0]], str(index), font = font)
    end = time.time() - start
    return (nr_objects, end, image_processed, bboxes, image_downloaded)
项目:emotion-conv-net    作者:GautamShine    | 项目源码 | 文件源码
def saveTestImage(img,filename=None,outDir=None):
  # Get image filename from current timestamp
  if filename is None:
    ts = time.time()
    formatStr = "%Y-%m-%d_%H-%M-%S"
    filestr = datetime.datetime.fromtimestamp(ts).strftime(formatStr)
    filename = filestr + ".png"

  if outDir is not None:
    mkdirNoForce(outDir)
    filename = outDir + "/" + filename

  # Save image
  im = Image.fromarray(toggleRGB(img))
  im.save(filename)

  # Return filename
  return filename
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def match_images(input_dir, dirs, files):
    split_counter = 0
    for i in range(0, len(files)):
        print (i)
        img1 = plt.imread(os.path.join(input_dir, files[i]))
        img1 = (img1 * 255).round().astype(np.uint8)
        img1 = imresize(img1, (64, 64))
        for j in range(i+1 , len(files)):
            if (files[j][:4] == files[i][:4]):
                name = "match"+str(files[i][:10]) + "_"+ str(files[j][:10]) + ".png"
                img2 = plt.imread(os.path.join(input_dir, files[j]))
                img2 = (img2 * 255).round().astype(np.uint8)
                img2 = imresize(img2, (64, 64))
                img = np.vstack((img1, img2))
                img = Image.fromarray(img)
                if(split_counter < 8000):
                    split_counter+=1
                    img.save(os.path.join(dirs[1], name))
                else:
                    img.save(os.path.join(dirs[0], name))
项目:deepFace    作者:mlakhal    | 项目源码 | 文件源码
def reshapeDataset(dataset, newWidth, newHeight):
  """Reshape a given datset to a new one with predefined size: (newWidth, newHeight).

  Args:
    dataset (numpy array): dataset array
    newWidth (int)       : width of the new image
    newHeight (int)      : height of the new image

  Returns:
    numpy array : reshaped dataset

  """
  new_dataset = []
  for data in dataset:
    size = (newWidth, newHeight)
    img = Image.fromarray(data)
    img = img.resize(size)
    img = np.array(img)
    new_dataset.append(img[np.newaxis, :, :])

  return np.array(new_dataset)
项目:STS-PiLot    作者:mark-orion    | 项目源码 | 文件源码
def single_frame():
    sbuffer = StringIO.StringIO()
    camtest = False
    while camtest == False:
        camtest, rawimg = cfg.camera.read()
    if cfg.cv_hflip:
        rawimg = cv2.flip(rawimg, 1)
    if cfg.cv_vflip:
        rawimg = cv2.flip(rawimg, 0)
    imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(imgRGB)
    img.save(sbuffer, 'JPEG')
    return sbuffer.getvalue()
项目:PiWifiCam    作者:mark-orion    | 项目源码 | 文件源码
def single_frame():
    sbuffer = StringIO.StringIO()
    camtest = False
    while camtest == False:
        camtest, rawimg = cfg.camera.read()
    if cfg.cv_hflip:
        rawimg = cv2.flip(rawimg, 1)
    if cfg.cv_vflip:
        rawimg = cv2.flip(rawimg, 0)
    imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(imgRGB)
    img.save(sbuffer, 'JPEG')
    return sbuffer.getvalue()
项目:office-interoperability-tools    作者:milossramek    | 项目源码 | 文件源码
def saveRslt(overlayStyle, title, img0, img1, name0, name1, rslt, rsltText, outfile):
    exifcmd = 'exiftool -overwrite_original -Custom1="%s" %s >/dev/null'
    oname = "%s-%s.pdf"%(outfile, overlayStyle)
    if overlayStyle=='s':
    s=np.minimum(img0.shape, img1.shape)
    outimg=genside(img0, img1, s[0], s[1], name0, name1, rsltText.replace('*',' '), '')
    else:
        outimg = genoverlay(img0, title, name0, name1, rslt, img2=img1) 
    Image.fromarray(outimg).save(oname)
    os.system(exifcmd%(rsltText, oname))
项目:pycaffe-yolo    作者:Zehaos    | 项目源码 | 文件源码
def draw_label(self, image, label):
        img_shape = np.shape(image)
        mask = label[:, :, 0]
        locations = np.where(mask > 0)
        img = Image.fromarray(image)
        drawobj = ImageDraw.Draw(img)
        #print mask
        for [i, j] in zip(locations[0], locations[1]):
            l = label[i][j][:]
            yolo_box = l[1:5]
            x = yolo_box[0]
            y = yolo_box[1]
            w = yolo_box[2]
            h = yolo_box[3]
            width = w*img_shape[1]
            height = h*img_shape[0]
            xmin = int(x*img_shape[1] - 0.5*width)
            ymin = int(y*img_shape[0] - 0.5*height)
            xmax = int(xmin+width)
            ymax = int(ymin+height)
            drawobj.rectangle([xmin, ymin, xmax, ymax], outline="blue")
            drawobj.point([0.5*(xmin+xmax), 0.5*(ymin+ymax)])
            for k in range(0, 7):
                drawobj.line([448/7.0*k, 0, 448/7.0*k, 448])
                drawobj.line([0, 448 / 7.0 * k, 448, 448 / 7.0 * k])
            #print label[i][j]
        img.show()
项目:retrieval-2016-deepvision    作者:imatge-upc    | 项目源码 | 文件源码
def create_thumb(self,im):

        x = 800
        y = 800
        size = (y,x)
        image = Image.fromarray(im)

        image.thumbnail(size, Image.ANTIALIAS)
        background = Image.new('RGBA', size, "black")
        background.paste(image, ((size[0] - image.size[0]) / 2, (size[1] - image.size[1]) / 2))

        return np.array(background)[:,:,0:3]
项目:facemoji    作者:PiotrDabrowskey    | 项目源码 | 文件源码
def nparray_as_image(nparray, mode='RGB'):
    """
    Converts numpy's array of image to PIL's Image.
    :param nparray: Numpy's array of image.
    :param mode: Mode of the conversion. Defaults to 'RGB'.
    :return: PIL's Image containing the image.
    """
    return Image.fromarray(np.asarray(np.clip(nparray, 0, 255), dtype='uint8'), mode)
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def display_patch( img, annot ) :
    left    = int( annot[0] )
    btm     = int( annot[1] )
    width   = int( annot[2] )
    height  = int( annot[3] )
    # img.crop( (left, btm, left+width, btm+height) ).show()
    tmp_array = np.multiply(img[btm:btm+height, left:left+width], 255.).astype(np.uint8)
    tmp_img = Image.fromarray(tmp_array)
    tmp_img.show()
    return 1
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def save_tmp_img( tmp_matrix, new_index ) :
    tmp_img = Image.fromarray( tmp_matrix )
    tmp_img.save("%s/NS_96net/ns-%s.jpg" %(base_path, new_index) )

# -----------------------------------------
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def save_pyramid () :
    global temp_line
    global pyramids
    global patchNum
    global total_patch
    global total_pyramid

    org_img = Image.open("%s/../fddb/%s.jpg" %(base_path, temp_line), 'r' )

    org_img_name = "%s " %(temp_line)       # original image name

    pyramids = list( pyramid_gaussian(org_img, downscale=math.sqrt(2) ) )
    for i in range(len(pyramids) ):
        if min( pyramids[i].shape[0], pyramids[i].shape[1] ) < MinFace :
            del pyramids[i:]
            break

    for i in range( len (pyramids) ) :
        row = pyramids[i].shape[0]
        col = pyramids[i].shape[1]
        im_matrix = np.zeros([row, col, 3]).astype('uint8')

        for k in range(row):
            for j in range(col):
                im_matrix[k,j] = pyramids[i][k,j] * 255

        new_img = Image.fromarray(im_matrix)
        new_img.save("%s/pyramid-%s.jpg" %(ns_path, i+total_pyramid) )
        # new_img.show()

        patchNum[i] = (row-MinFace+1) * (col-MinFace+1)               # the number of patches
    total_pyramid = total_pyramid + len(pyramids)
    total_patch = total_patch + sum(patchNum)

# -----------------------------------------
项目:malmomo    作者:matpalm    | 项目源码 | 文件源码
def rgb_to_png_bytes(rgb):
  img = Image.fromarray(rgb)
  sio = StringIO.StringIO()
  img.save(sio, format="png")
  return sio.getvalue()
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def faceCrop(targetDir, imgList, color, single_face):
    # Load list of Haar cascades for faces
    faceCascades = load_cascades()

    # Iterate through images
    face_list = []
    for img in imgList:
        if os.path.isdir(img):
            continue
        pil_img = Image.open(img)
        if color:
            cv_img  = cv.cvtColor(np.array(pil_img), cv.COLOR_RGB2BGR)
        else:
            cv_img = np.array(pil_img)
            # Convert to grayscale if this image is actually color
            if cv_img.ndim == 3:
                cv_img = cv.cvtColor(np.array(pil_img), cv.COLOR_BGR2GRAY)

        # Detect all faces in this image
        scaled_img, faces = DetectFace(cv_img, color, faceCascades, single_face, second_pass=False, draw_rects=False)

        # Iterate through faces
        n=1
        for face in faces:
            cropped_cv_img = imgCrop(scaled_img, face, scale=1.0)
            if color:
                cropped_cv_img = rgb(cropped_cv_img)
            fname, ext = os.path.splitext(img)
            cropped_pil_img = Image.fromarray(cropped_cv_img)
            #save_name = loc + '/cropped/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            save_name = targetDir + '/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            cropped_pil_img.save(save_name)
            face_list.append(save_name)
            n += 1

    return face_list

# Add an emoji to an image at a specified point and size
# Inputs: img, emoji are ndarrays of WxHx3
#         faces is a list of (x,y,w,h) tuples for each face to be replaced
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def saveSingleImage(frame,file):
  # Save cropped image. Can also rescale cropbox
  im = Image.fromarray(toggleRGB(frame))
  im.save(file)

# Crop and save image, including adding jitter
项目:CNNs-Speech-Music-Discrimination    作者:MikeMpapa    | 项目源码 | 文件源码
def createSpectrogramFile(x, Fs, fileName, stWin, stStep):
        specgramOr, TimeAxis, FreqAxis = aF.stSpectogram(x, Fs, round(Fs * stWin), round(Fs * stStep), False)            
        print specgramOr.shape
        if inputs[2]=='full':
            print specgramOr
            numpy.save(fileName.replace('.png','')+'_spectrogram', specgramOr)
        else:   
            #specgram = scipy.misc.imresize(specgramOr, float(227.0) / float(specgramOr.shape[0]), interp='bilinear')                        
            specgram = cv2.resize(specgramOr,(227, 227), interpolation = cv2.INTER_LINEAR)
            im1 = Image.fromarray(numpy.uint8(matplotlib.cm.jet(specgram)*255))
            scipy.misc.imsave(fileName, im1)
项目:CNNs-Speech-Music-Discrimination    作者:MikeMpapa    | 项目源码 | 文件源码
def mtCNN_classification(signal, Fs, mtWin, mtStep, RGB_singleFrame_net, SOUND_mean_RGB, transformer_RGB, classNamesCNN):
    mtWin2 = int(mtWin * Fs)
    mtStep2 = int(mtStep * Fs)
    stWin = 0.020
    stStep = 0.015    
    N = len(signal)
    curPos = 0
    count = 0
    fileNames = []
    flagsInd = []
    Ps = []
    randomString = (''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(5)))
    while (curPos < N):                 # for each mid-term segment
        N1 = curPos
        N2 = curPos + mtWin2 + stStep*Fs
        if N2 > N:
            N2 = N
        xtemp = signal[int(N1):int(N2)]                # get mid-term segment        

        specgram, TimeAxis, FreqAxis = aF.stSpectogram(xtemp, Fs, round(Fs * stWin), round(Fs * stStep), False)     # compute spectrogram
        if specgram.shape[0] != specgram.shape[1]:                                                                  # TODO (this must be dynamic!)
            break
        specgram = scipy.misc.imresize(specgram, float(227.0) / float(specgram.shape[0]), interp='bilinear')        # resize to 227 x 227

        imSpec = Image.fromarray(np.uint8(matplotlib.cm.jet(specgram)*255))                                         # create image
        curFileName = randomString + "temp_{0:d}.png".format(count)
        fileNames.append(curFileName)    
        scipy.misc.imsave(curFileName, imSpec)

        T1 = time.time()
        output_classes, outputP = singleFrame_classify_video(curFileName, RGB_singleFrame_net, transformer_RGB, False, classNamesCNN)        
        T2 = time.time()
        #print T2 - T1
        flagsInd.append(classNamesCNN.index(output_classes[0]))        
        Ps.append(outputP[0])
        #print flagsInd[-1]
        curPos += mtStep2               
        count += 1              
    return np.array(flagsInd), classNamesCNN, np.array(Ps)
项目:text-renderer    作者:cjnolet    | 项目源码 | 文件源码
def resize_image(im, r=None, newh=None, neww=None, filtering=Image.BILINEAR):
    dt = im.dtype
    I = Image.fromarray(im)
    if r is not None:
        h = im.shape[0]
        w = im.shape[1]
        newh = int(round(r*h))
        neww = int(round(r*w))
    if neww is None:
        neww = int(newh*im.shape[1]/float(im.shape[0]))
    if newh > im.shape[0]:
        I = I.resize([neww, newh], Image.ANTIALIAS)
    else:
        I.thumbnail([neww, newh], filtering)
    return n.array(I).astype(dt)
项目:text-renderer    作者:cjnolet    | 项目源码 | 文件源码
def apply_perspective_arr(self, arr, affstate, perstate, filtering=Image.BICUBIC):
        img = Image.fromarray(arr)
        img = img.transform(img.size, self.affinestate.proj_type,
            affstate,
            filtering)
        img = img.transform(img.size, self.perspectivestate.proj_type,
            perstate,
            filtering)
        arr = n.array(img)
        return arr
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def resize_hdf5(input_filename, output_filename, dataset_name, batch_size, new_width = 64, new_height = 64):

    with h5py.File(filename,'r') as hf:
        data = np.array(hf.get(dataset_name))
    nr_of_points = data.shape[0]
    depth = data.shape[3]
    newdata = np.empty(shape=(nr_of_points, new_width, new_height, depth), dtype=np.uint8)

    for i in xrange(nr_of_points):
        datum = data[i,:,:,:]
        resized_datum = np.asarray(Image.fromarray((datum * 255).astype(np.uint8), 'RGB').resize((new_width,new_height), PIL.Image.ANTIALIAS))
        newdata[i,:,:,:] = resized_datum

    with h5py.File(output_filename, 'w') as hf:
        hf.create_dataset(dataset_name, data = newdata)
项目:VAE_tensorflow    作者:int8    | 项目源码 | 文件源码
def read_data_from_dir(dir_path, resize_to):
    files = os.listdir(dir_path)
    nr_of_images = len(files)
    data = {
        'files': ['' for _ in xrange(nr_of_images)],
        'tensors': np.empty(shape=(nr_of_images, resize_to[0], resize_to[1], resize_to[2]), dtype=np.uint8)
    }
    i = 0
    for f in files:
        datum = imread(dir_path + '/' + f)
        datum = np.asarray(Image.fromarray((datum), 'RGB').resize((resize_to[0],resize_to[1]), PIL.Image.ANTIALIAS))
        data['tensors'][i, :, :, :] = datum
        data['files'][i] = f
        i = i + 1
    return data
项目:slam-tutorial-code    作者:tuongngoc    | 项目源码 | 文件源码
def set_background(self, np_array, color = False):
        """Takes a (numpy) array and sets this as background."""
        if color:
            img = Image.fromarray(np.flipud(np.uint8(np_array)), mode="RGB")
        else:
            img = Image.fromarray(np_array)
        if self.background_image_id:
            self.world_canvas.delete(self.background_image_id)
        img = img.resize(self.extents, Image.NEAREST)
        self.background_image = ImageTk.PhotoImage(img)
        self.background_id = self.world_canvas.create_image(0, 0,
            image=self.background_image, anchor=NW, tag="background")
        # Make sure drawing order is correct.
        self.set_display_order()
项目:pythonista-scripts    作者:khilnani    | 项目源码 | 文件源码
def crop_image(cls, img):
        image_data = numpy.asarray(img)
        image_data_bw = image_data.max(axis=2)
        non_empty_columns = numpy.where(image_data_bw.max(axis=0)>0)[0]
        non_empty_rows    = numpy.where(image_data_bw.max(axis=1)>0)[0]
        crop_box = (min(non_empty_rows),    max(non_empty_rows),
                    min(non_empty_columns), max(non_empty_columns))
        image_data_new = image_data[crop_box[0]:crop_box[1]+1,
                                    crop_box[2]:crop_box[3]+1, :]
        img = Image.fromarray(image_data_new)
        return img
项目:doublecnn    作者:Shuangfei    | 项目源码 | 文件源码
def save_images(X, file_name, image_shape=(28, 28), tile_shape=(10, 10), color=False):
    if color:
        img_size = numpy.prod(image_shape)
        X = (X[:, :img_size], X[:, img_size:2*img_size], X[:, 2*img_size:], None)
    image = Image.fromarray(
        tile_raster_images(X=X,
                           img_shape=image_shape,
                           tile_shape=tile_shape,
                           tile_spacing=(1, 1))
    )
    image.save(file_name)
项目:emotion-conv-net    作者:GautamShine    | 项目源码 | 文件源码
def faceCrop(targetDir, imgList, color, single_face):
    # Load list of Haar cascades for faces
    faceCascades = load_cascades()

    # Iterate through images
    face_list = []
    for img in imgList:
        if os.path.isdir(img):
            continue
        pil_img = Image.open(img)
        if color:
            cv_img  = cv.cvtColor(np.array(pil_img), cv.COLOR_RGB2BGR)
        else:
            cv_img = np.array(pil_img)
            # Convert to grayscale if this image is actually color
            if cv_img.ndim == 3:
                cv_img = cv.cvtColor(np.array(pil_img), cv.COLOR_BGR2GRAY)

        # Detect all faces in this image
        scaled_img, faces = DetectFace(cv_img, color, faceCascades, single_face, second_pass=False, draw_rects=False)

        # Iterate through faces
        n=1
        for face in faces:
            cropped_cv_img = imgCrop(scaled_img, face, scale=1.0)
            if color:
                cropped_cv_img = rgb(cropped_cv_img)
            fname, ext = os.path.splitext(img)
            cropped_pil_img = Image.fromarray(cropped_cv_img)
            #save_name = loc + '/cropped/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            save_name = targetDir + '/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            cropped_pil_img.save(save_name)
            face_list.append(save_name)
            n += 1

    return face_list

# Add an emoji to an image at a specified point and size
# Inputs: img, emoji are ndarrays of WxHx3
#         faces is a list of (x,y,w,h) tuples for each face to be replaced
项目:emotion-conv-net    作者:GautamShine    | 项目源码 | 文件源码
def saveSingleImage(frame,file):
  # Save cropped image. Can also rescale cropbox
  im = Image.fromarray(toggleRGB(frame))
  im.save(file)

# Crop and save image, including adding jitter
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def run(self):
        with self.output().open('w') as out_file:
            print "non matching"
            #validation_counter =0
        #testing_counter =0
            for i in range(1, 1569):
                if (i < 10):
                    key = "000" + str(i)
                elif (i < 100):
                    key = "00" + str(i)
                elif (i < 1000):
                    key = "0" + str(i)
                else:
                    key = str(i)
                shortlisted_file_names = [filename for filename in self.files if filename[:4] == key]
                if shortlisted_file_names:
                    shortlisted = list(set(self.files) - set(shortlisted_file_names))
            validation_counter = 0
            testing_counter = 0 
                    for j in range(0, 54):
                        img1_key = random.choice(shortlisted_file_names)
                        img1 = plt.imread(os.path.join(config.input_dir, img1_key))
                        img1 = (img1 * 255).round().astype(np.uint8)
                        img1 = imresize(img1, (28, 64))
                        img2_key = random.choice(shortlisted)
                        shortlisted = list(set(shortlisted) - set(img2_key))
                        img2 = plt.imread(os.path.join(config.input_dir, img2_key))
                        name = "mis_match" + str(img1_key[:10]) + "_" + str(img2_key[:10]) + ".png"
                        img2 = (img2 * 255).round().astype(np.uint8)
                        img2 = imresize(img2, (28, 64))
                        img = np.vstack((img1, img2))
                        #img = img1 & img2
                        img = Image.fromarray(img)
                        if (validation_counter <5):
                            validation_counter += 1
                            img.save(os.path.join(self.dirs[2], name))
            elif(testing_counter < 5):
                testing_counter += 1
                img.save(os.path.join(self.dirs[1] , name))
                        else:
                            img.save(os.path.join(self.dirs[0], name))
            out_file.write("Status : done")
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def non_match_images(input_dir , dirs ,files):
    split_counter = 0
    for i in range(1, 1569):
        if(i < 10):
            key = "000" + str(i)
        elif (i < 100):
            key = "00" + str(i)
        elif (i <1000):
            key = "0" + str(i)
        else :
            key = str(i)
        shortlisted_file_names = [ filename for filename in files if filename[:4] == key]
        if shortlisted_file_names:
            shortlisted = list(set(files) - set(shortlisted_file_names))
            for j in range(0,54):
                img1_key = random.choice(shortlisted_file_names)
                img1 = plt.imread(os.path.join(input_dir, img1_key))
                img1 = (img1 * 255).round().astype(np.uint8)
                img1 = imresize(img1, (64, 64))
                img2_key = random.choice(shortlisted)
                shortlisted = list(set(shortlisted) - set(img2_key))
                img2 = plt.imread(os.path.join(input_dir, img2_key))
                name = "mis_match" + str(img1_key[:10]) + "_" + str(img2_key[:10]) + ".png"
                img2 = (img2 * 255).round().astype(np.uint8)
                img2 = imresize(img2, (64, 64))
                img = np.vstack((img1, img2))
                img = Image.fromarray(img)
                if(split_counter < 8000):
                    split_counter+=1
                    img.save(os.path.join(dirs[1], name))
                else:
                    img.save(os.path.join(dirs[0], name))
项目:OptiCalcRead    作者:MrLeylo    | 项目源码 | 文件源码
def draw(coordinates):
    #Escaneja la matriu i representa la imatge
    fig=plt.figure(1)
    fig.canvas.set_window_title('Input')
    for i in range(coordinates.shape[0]):
        if[0,0] in coordinates[i]:
            indexos=np.where(coordinates[i]==[0,0])
            for j in range(len(indexos[1])):
                for k in range(2):
                    coordinates[i,indexos[0][j],indexos[1]]=coordinates[i,indexos[0][0]-1,indexos[1]]
        lineP,=plt.plot(coordinates[i,range(coordinates.shape[1]),0],-coordinates[i,range(coordinates.shape[1]),1],'-')
    #Busquem la x i la y maximes de la imatge total, mirant totes les coordenades de la matriu
    print 'Initializing segmentation..........'
    listcoordinates=coordinates.tolist()
    x=[]
    y=[]
    for j in range(len(listcoordinates)):
        xb, yb = [i[0] for i in listcoordinates[j]], [i[1] for i in listcoordinates[j]]
        x.extend(xb)
        y.extend(yb)
    difX, difY = max(x)-min(x), max(y)-min(y)
    #Monta i omple la imatge normalitzada
    imatge = np.zeros([50,500])
    for i in range(len(x)):
        imatge[(50*(y[i]-min(y))/difY)-1, (500*(x[i]-min(x))/difX)-1] = 1
    img=Image.fromarray(imatge*255)
    return img,[x,y],[difX,difY]
项目:Vehicle_ReID    作者:starimpact    | 项目源码 | 文件源码
def drawText(cimg, txt, posxy, fz):
    ttFont0 = ImageFont.truetype(fontfile, fz)
    im = Image.fromarray(cimg, 'RGB')
    drawable = ImageDraw.Draw(im)
    drawable.text ((posxy[0], posxy[1]), txt, fill=(0, 255, 0), font=ttFont0)
    npimg = np.asarray(im)

    return npimg
项目:Vehicle_ReID    作者:starimpact    | 项目源码 | 文件源码
def drawText_Color(cimg, txt, posxy, fz, color):
    ttFont0 = ImageFont.truetype(fontfile, fz)
    im = Image.fromarray(cimg, 'RGB')
    drawable = ImageDraw.Draw(im)
    drawable.text((posxy[0], posxy[1]), txt, fill=color, font=ttFont0)
    npimg = np.asarray(im)

    return npimg
项目:Vehicle_ReID    作者:starimpact    | 项目源码 | 文件源码
def drawText_BKG(cimg, txt, posxy, fz, bkglen):
    ttFont0 = ImageFont.truetype(fontfile, fz)
    im = Image.fromarray(cimg, 'RGB')
    drawable = ImageDraw.Draw(im)
    drawable.polygon(((posxy[0], posxy[1]), \
                      (posxy[0]+bkglen, posxy[1]), \
                      (posxy[0]+bkglen, posxy[1]+fz), \
                      (posxy[0], posxy[1]+fz)), fill=(255, 255, 255))
    drawable.text ((posxy[0], posxy[1]), txt, fill=(0, 0, 255), font=ttFont0)
    npimg = np.asarray(im)

    return npimg
项目:SLAM    作者:jfjensen    | 项目源码 | 文件源码
def set_background(self, np_array, color = False):
        """Takes a (numpy) array and sets this as background."""
        if color:
            img = Image.fromarray(np.flipud(np.uint8(np_array)), mode="RGB")
        else:
            img = Image.fromarray(np_array)
        if self.background_image_id:
            self.world_canvas.delete(self.background_image_id)
        img = img.resize(self.extents, Image.NEAREST)
        self.background_image = ImageTk.PhotoImage(img)
        self.background_id = self.world_canvas.create_image(0, 0,
            image=self.background_image, anchor=NW, tag="background")
        # Make sure drawing order is correct.
        self.set_display_order()
项目:PiStorms    作者:mindsensors    | 项目源码 | 文件源码
def fillImgArray(self, x, y, width, height, image, display = True):
        buff = self.disp.buffer
        actx = self.screenXFromImageCoords(x,y)
        acty = self.screenYFromImageCoords(x,y)

        image = Image.fromarray(image)

        image = image.resize((width,height), Image.ANTIALIAS)

        cr = self.currentRotation
        if(cr == 1):
            actx -= height
            image = image.transpose(Image.ROTATE_270)
        if(cr == 2):
            acty -= height
            actx -= width
            image = image.transpose(Image.ROTATE_180)
        if(cr == 3):
            acty -= width
            image = image.transpose(Image.ROTATE_90)

        buff.paste(image, (actx,acty))
        if(display):
            self.disp.display()

    ## Rotates the screen orientation 90 degrees to the right (-90 degrees)
    #  @param self The object pointer.
    #  @remark
    #  To use this function in your program:
    #  @code
    #  ...
    #  screen.rotateRight()
    #  @endcode
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def create_ns (tmp_imgpath, cnt_ns ) :
    global pyramids

    tmp_img = Image.open("%s/%s" %(coco_path, tmp_imgpath), 'r' )
    pyramids = list( pyramid_gaussian( tmp_img, downscale=math.sqrt(2) ) )

    for i in range ( len(pyramids) ):
        if min( pyramids[i].shape[0], pyramids[i].shape[1] ) < MinFace :
            del pyramids[i:]
            break

    # for j in range(4) :
    for j in range(36) :
        # creating random index
        img_index = random.randint(0, len(pyramids)-1 )
        tmp_patch_num = ( pyramids[img_index].shape[0] - 12 + 1) * ( pyramids[img_index].shape[1] - 12 + 1)
        rand_index = random.randint(0, tmp_patch_num)

        # x, y position decoding
        row_max = pyramids[img_index].shape[0]
        col_max = pyramids[img_index].shape[1]
        row = 0
        col = rand_index

        while ( col >= col_max - 12 +1 ) :
            row = row + 1
            col = col - (col_max-12+1)

        flag = 0
        # Rejecting Black and White image
        tmp_ns = pyramids[img_index][row:row+12, col:col+12]
        if not len(tmp_ns.shape)==3 :
            print " Gray Image. Skip "
            return 0

        # Rejecting Positive Samples
        scale_factor = math.sqrt(2)**img_index

        tmp_ns = pyramids[img_index][row:row+12, col:col+12]
        tmp_ns = Image.fromarray((tmp_ns*255.0).astype(np.uint8) )
        # tmp_ns = tmp_ns.resize( (12,12), Image.BICUBIC )
        tmp_ns = tmp_ns.resize( (12,12), Image.BILINEAR )
        tmp_ns.save("%s/ns-%s.jpg" %(ns_path, cnt_ns+j) )

    return 1

# -----------------------------------------
项目:kaggle_art    作者:small-yellow-duck    | 项目源码 | 文件源码
def preprocess(X):
    #no preprocessing - just rescale the pixel values to the interval [0.0, 1.0]
    #return X / 255.0

    #this preprocessor crops one pixel along each of the sides of the images
    #this is a teeny tiny improvement on the "no preprocessing" option
    #return X[:, :, 1:-1, 1:-1] / 255.0 

    #this preprocessor adds pixels along the bottom and side of the images
    #t = np.zeros((X.shape[0], X.shape[1], 36, 36))
    #t[:, :, 0:X.shape[2], 0:X.shape[3]] = X/255.0
    #return t


    #if data is in training set, then the chunk size is 1.
    #if data is in training set, randomly scale the image size up or down   
    if X.shape[0] == 1:
        #randomly scale the size of the image up or down
        ns = np.random.randint(25, 33)

        #Python Image Library expects arrays of format [width, height, 3] or [width, height]
        #theano/keras expects images of format [colours, width, height]
        if X.shape[1] == 3:
            im = Image.fromarray(np.rollaxis(X[0, :, :, :], 0, 3).astype(np.uint8))
            im.thumbnail((ns, ns),Image.ANTIALIAS)
            X = np.rollaxis(np.array(im), 2,0).reshape((1,-1, im.size[0], im.size[1]))

        if X.shape[1] == 1:
            im = Image.fromarray(X[0, 0, :, :].astype(np.uint8))    
            im.thumbnail((ns, ns),Image.ANTIALIAS)
            X = np.array(im).reshape((1,-1, im.size[0], im.size[1]))


    #print(X.shape)
    #pad with greyscale checkerboard    
    t = 0.2*np.ones((X.shape[1], 4,4))
    t[:, 0:2, 0:2] = 0.1*np.ones((X.shape[1], 2,2)) 
    t[:, 2:4, 2:4] = 0.1*np.ones((X.shape[1], 2,2)) 
    t = np.tile(t, (1, 9, 9))
    t = np.tile(t.reshape((1,t.shape[0], t.shape[1], t.shape[2])), (X.shape[0], 1, 1, 1))

    #padding only one side and the bottom means that the training loss -> nan after a few
    #epochs because there is never any information in these regions!
    #t = np.zeros((X.shape[1], 4,4))
    #i = np.random.randint(0, 4*9-X.shape[2])
    #j = np.random.randint(0, 4*9-X.shape[3])
    #t[:, :, i : i+X.shape[2], j:j+X.shape[3]] = X/255.0

    return t
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def run(self):
        with self.output().open('w') as out_file:
            index = 0
        while( index < len(self.files_1) - 1):
                #print index
                matches_counter = 1
                person_images = []
                person_images.append(self.files_1[index])
                flag = True
                while(flag):
                        next_index = index + matches_counter
                        #print next_index
                        if (next_index < len(self.files_1)) and (self.files_1[index][:4] == self.files_1[next_index][:4]):
                                person_images.append(self.files_1[next_index])
                                matches_counter+=1
                        else:
                                flag = False
                #print person_images
                if(len(person_images) > 1):
                        split_count_test = 0
                        split_count_valid = 0
                        for i in range(0 , len(person_images)):
                                img1 = plt.imread(os.path.join(config.input_dir , person_images[i]))
                                img1 = (img1*255).round().astype(np.uint8)
                                img1 = imresize(img1, (28 , 64))
                                for j in range(i+1,len(person_images)):
                                        name = 'match' + str(person_images[i][:10]) + "_"+ str(person_images[j][:10]) + ".png"
                                        img2 = plt.imread(os.path.join(config.input_dir , person_images[j]))
                                        img2 = (img2*255).round().astype(np.uint8)
                                        img2 = imresize(img2 , (28,64))
                                        img = np.vstack((img1 , img2))
                                        img = Image.fromarray(img)
                                        if(split_count_valid < int(0.2*(len(person_images)))):
                                                split_count_valid+=1
                                                img.save(os.path.join(self.dirs_1[2],name))
                                        elif(split_count_test < int(0.2*(len(person_images)))):
                                                split_count_test+=1
                                                img.save(os.path.join(self.dirs_1[1],name))
                                        else:
                                                img.save(os.path.join(self.dirs_1[0],name))


                index = next_index

            out_file.write("Status : done")
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def run(self):
        with self.output().open('w') as out_file:
            index = 0
            while( index < len(self.files_1) - 1):
                #print index
                matches_counter = 1
                person_images = []
                #print "------------------------"
                #print self.files_1[index]
                person_images.append(self.files_1[index])
                flag = True
                while(flag):
                    next_index = index + matches_counter
                    #print next_index
                    if (next_index < len(self.files_1)) and (self.files_1[index][:4] == self.files_1[next_index][:4]):
                        #print self.files_1[next_index]
                        person_images.append(self.files_1[next_index])
                        matches_counter+=1
                    else:
                        flag = False
                permutations = 0
                for p in range(1 , len(person_images)-1):
                    permutations+=p
                #print "PERMUTATIONS" , permutations    
                if(len(person_images) > 1):
                    split_count_test = 0
                    split_count_valid = 0
                    for i in range(0 , len(person_images)):
                        img1 = plt.imread(os.path.join(config.input_dir , person_images[i]))
                        img1 = (img1*255).round().astype(np.uint8)
                        img1 = imresize(img1, (32 , 64))
                        for j in range(i+1,len(person_images)):
                            #print j
                            name = 'match' + str(person_images[i][:10]) + "_"+ str(person_images[j][:10]) + ".png"
                            img2 = plt.imread(os.path.join(config.input_dir , person_images[j]))
                            img2 = (img2*255).round().astype(np.uint8)
                            img2 = imresize(img2 , (32,64))
                            #img = img1 & img2
                            img = np.vstack((img1, img2))
                            img = Image.fromarray(img) 
                            #print("PERSON IMAGES  :  ",len(person_images))
                            #print("SPLITTING IMAGES : ",int(0.2*(len(person_images))))
                            if(split_count_valid < int(0.1*(permutations))):
                                split_count_valid+=1
                                img.save(os.path.join(self.dirs_1[2],name))
                            elif(split_count_test < int(0.1*(permutations))):
                                split_count_test+=1
                                img.save(os.path.join(self.dirs_1[1],name))
                            else:
                                img.save(os.path.join(self.dirs_1[0],name))


                index = next_index

            out_file.write("Status : done")