Python cv2 模块,imwrite() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.imwrite()

项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def crop_and_store(frame, mouth_coordinates, name):
    """
    Args:
        1. frame:               The frame which has to be cropped.
        2. mouth_coordinates:   The coordinates which help in deciding which region is to be cropped.
        3. name:                The path name to be used for storing the cropped image.
    """

    # Find bounding rectangle for mouth coordinates
    x, y, w, h = cv2.boundingRect(mouth_coordinates)

    mouth_roi = frame[y:y + h, x:x + w]

    h, w, channels = mouth_roi.shape
    # If the cropped region is very small, ignore this case.
    if h < 10 or w < 10:
        return

    resized = resize(mouth_roi, 32, 32)
    cv2.imwrite(name, resized)
项目:facial_emotion_recognition    作者:adamaulia    | 项目源码 | 文件源码
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX

    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)

    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)

    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0)
项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image
项目:Mini-Projects    作者:gaborvecsei    | 项目源码 | 文件源码
def CaptureImage():
    imageName = 'DontCare.jpg' #Just a random string
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
        rgbImage = frame #For capture the image in RGB color space

        # Display the resulting frame
        cv2.imshow('Webcam',rgbImage)
        #Wait to press 'q' key for capturing
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #Set the image name to the date it was captured
            imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
            #Save the image
            cv2.imwrite(imageName, rgbImage)
            break
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    #Returns the captured image's name
    return imageName
项目:blind-watermark    作者:linyacool    | 项目源码 | 文件源码
def encode(img_path, wm_path, res_path, alpha):
    img = cv2.imread(img_path)
    img_f = np.fft.fft2(img)
    height, width, channel = np.shape(img)
    watermark = cv2.imread(wm_path)
    wm_height, wm_width = watermark.shape[0], watermark.shape[1]
    x, y = range(height / 2), range(width)
    random.seed(height + width)
    random.shuffle(x)
    random.shuffle(y)
    tmp = np.zeros(img.shape)
    for i in range(height / 2):
        for j in range(width):
            if x[i] < wm_height and y[j] < wm_width:
                tmp[i][j] = watermark[x[i]][y[j]]
                tmp[height - 1 - i][width - 1 - j] = tmp[i][j]
    res_f = img_f + alpha * tmp
    res = np.fft.ifft2(res_f)
    res = np.real(res)
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_lines(img):
  edges = cv2.Canny(img,100,200)
  threshold = 60
  minLineLength = 10
  lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, 0, minLineLength, 20);
  if (lines is None or len(lines) == 0):
      return

  #print lines
  for line in lines[0]:
    #print line
    cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2)
  cv2.imwrite("line_edges.jpg", edges)
  cv2.imwrite("lines.jpg", img)
项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def downscale(old_file_name):
    img = cv2.imread(os.path.join(old_file_name))

    new_file_name = (old_file_name
                     .replace('training', 'training_' + str(min_size))
                     .replace('validation', 'validation_' + str(min_size))
                     .replace('testing', 'testing_' + str(min_size))
                     )

    height, width, _ = img.shape

    if width > height:
        new_width = int(1.0 * width / height * min_size)
        new_height = min_size

    else:
        new_height = int(1.0 * height / width * min_size)
        new_width = min_size

    img_new = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
    cv2.imwrite(new_file_name, img_new)
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def save_all_detection(im_array, detections, imdb_classes=None, thresh=0.7):
    """
    save all detections in one image with result.png
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param imdb_classes: list of names in imdb
    :param thresh: threshold for valid detections
    :return:
    """
    import random
    im = image_processing.transform_inverse(im_array, config.PIXEL_MEANS)
    im = im[:, :, ::-1].copy()  # back to b,g,r
    for j in range(1, len(imdb_classes)):
        color = (255*random.random(), 255*random.random(), 255*random.random())  # generate a random color
        dets = detections[j]
        for i in range(dets.shape[0]):
            bbox = dets[i, :4]
            score = dets[i, -1]
            if score > thresh:
                cv2.rectangle(im, (int(round(bbox[0])), int(round(bbox[1]))), 
                                (int(round(bbox[2])), int(round(bbox[3]))), color, 2)
                cv2.putText(im, '%s'%imdb_classes[j], (bbox[0], bbox[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
    cv2.imwrite("result.jpg", im)
项目:MusicGenerator    作者:Conchylicultor    | 项目源码 | 文件源码
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def on_mouse(event, x, y, flags, params):
    # global img
    t = time()

    if event == cv2.EVENT_LBUTTONDOWN:
        print 'Start Mouse Position: '+str(x)+', '+str(y)
        sbox = [x, y]
        boxes.append(sbox)
             # print count
             # print sbox

    elif event == cv2.EVENT_LBUTTONUP:
        print 'End Mouse Position: '+str(x)+', '+str(y)
        ebox = [x, y]
        boxes.append(ebox)
        print boxes
        crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]]

        cv2.imshow('crop',crop)
        k =  cv2.waitKey(0)
        if ord('r')== k:
            cv2.imwrite('Crop'+str(t)+'.jpg',crop)
            print "Written to file"
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours)
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def test_featuredetector(self):

        print self.photodir

        #read in images
        images = []
        for i in np.arange(1,7):
            images.append(cv2.imread(os.path.join(self.photodir,"Frosty5k","{}.jpg".format(i))))

        #read in bib
        bib = cv2.imread(os.path.join(self.photodir,"Frosty5k","bib.jpg"))

        for i in np.arange(1,7):
            image = images[i-1]
            bib_kp, image_kp, matches = fd.findMatchesBetweenImages(bib, image)
            output = fd.drawMatches(bib, bib_kp, image, image_kp, matches)

            ftoutdir = os.path.join(self.photooutdir,"features")
            print "Writing images to folder {}".format(ftoutdir)

            if not os.path.exists(ftoutdir):
                os.makedirs(ftoutdir)

            cv2.imwrite(os.path.join(ftoutdir,"{}matches.jpg".format(i)), output)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def imshow_cv(label, im, block=False, text=None, wait=2): 
    vis = im.copy()
    print_status(vis, text=text)
    window_manager.imshow(label, vis)
    ch = cv2.waitKey(0 if block else wait) & 0xFF
    if ch == ord(' '):
        cv2.waitKey(0)
    if ch == ord('v'):
        print('Entering debug mode, image callbacks active')
        while True: 
            ch = cv2.waitKey(10) & 0xFF
            if ch == ord('q'): 
                print('Exiting debug mode!')
                break
    if ch == ord('s'):
        fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S")
        print 'Saving %s' % fn
        cv2.imwrite(fn, vis)
    elif ch == 27 or ch == ord('q'):
        sys.exit(1)
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def save_images_grid(imgs, path, grid_w=4, grid_h=4, post_processing=postprocessing_tanh, transposed=False):
    imgs = copy_to_cpu(imgs)
    if post_processing is not None:
        imgs = post_processing(imgs)
    b, ch, w, h = imgs.shape
    assert b == grid_w*grid_h

    imgs = imgs.reshape((grid_w, grid_h, ch, w, h))
    imgs = imgs.transpose(0, 1, 3, 4, 2)
    if transposed:
        imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(1, 2, 0, 3, 4).reshape((grid_h*w, grid_w*h, ch))
    else:
        imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(0, 2, 1, 3, 4).reshape((grid_w*w, grid_h*h, ch))
    if ch==1:
        imgs = imgs.reshape((grid_w*w, grid_h*h))
    cv2.imwrite(path, imgs)
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def s1_predict(config_file, model_dir, model_file, predict_file_list, out_dir):
    """
    This function serves as a test/validation tool during the model development. It is not used as
    a final product in part of the pipeline.
    """
    with open(config_file) as config_buffer:
        config = json.loads(config_buffer.read())

    with tf.Graph().as_default() as graph:
        converted_model = ConvertedModel(config, graph, 's1_keras', model_dir, model_file)

    with tf.Session(graph=graph) as sess:
        for img_file in predict_file_list:
            image = cv2.imread(img_file)
            boxes = converted_model.predict(sess, image)
            image = draw_boxes(image, boxes)

            _, filename = os.path.split(img_file)
            cv2.imwrite(os.path.join(out_dir, filename), image)
项目:SudokuVisionSolver    作者:tusharsircar95    | 项目源码 | 文件源码
def getTestImage(img,size): 
    size = int(size)
    (x,y) = np.shape(img)
    left,right,bottom,top = x,0,y,0
    count = 0
    for i in range(x):
        for j in range(y):
            if img[i][j] == 255:
                left = min(left,i)
                right = max(right,i)
                top = max(top,j)
                bottom = min(bottom,j)

                count = count + 1
    if count == 0:
        return img

    img = img[left:right,bottom:top]
    cv2.imwrite('template.jpg',img)
    return img

# Divides the grid into 9x9 = 81 cells and does OCR on each after processing it
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def remove_specularity(img_files):
    """
    Removes highlights/specularity in Images from multiple view points

    :param img_files: File names of input images in horizontal order (important)
    """
    # read images from file names
    imgs = read_images(img_files)

    # solve each pair of image.
    # assumption: Input images are in order
    for i in range(len(imgs) - 1):
        logging.debug('processing images {} and {}'.format(i+1, i+2))
        imgs[i], imgs[i+1] = _solve(imgs[i], imgs[i + 1])

    for i, path in enumerate(img_files):
        fname = os.path.basename(path)
        res_file = os.path.join(RESULTS_DIR, fname)

        logging.info('saving the results in {}'.format(res_file))
        cv.imwrite(res_file, imgs[i])
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def create_composite_image_coin_id(coin_id, crop_dir, data_dir):
    images = []
    images_gif = []

    for id in range(0,56):
        image_id = coin_id * 100 + id
        crop = ci.get_rotated_crop(crop_dir, image_id, 56, 0)
        images.append(crop)
        filename =  ci.get_filename_from(image_id,crop_dir)
        images_gif.append(imageio.imread(filename))

    composite_image = ci.get_composite_image(images, 8, 8)
    cv2.imwrite(data_dir + str(coin_id) + '.png', composite_image)
    imageio.mimsave(data_dir + str(coin_id) + '.gif', images_gif)

    return
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def cutout(self, img, cut_point, img_path='trim', padding=False, extra_cut=False):
        '''??????????????image_path?????'''
        px = self.padding_x if padding else 0
        py = self.padding_y if padding else 0
        cp_x = cut_point['x']
        cp_y = cut_point['y']
        for i in range(0, len(cp_y)):
            if i % 2 == 0:
                img_cut_1_4 = img[cp_y[i] - py:cp_y[i + 1] + py, cp_x[2] - px:cp_x[3] + px]
                img_cut_5_8 = img[cp_y[i] - py:cp_y[i + 1] + py, cp_x[0] - px:cp_x[1] + px]
                if extra_cut:
                    # ???????1???????????????????
                    img_cut_1_4 = hybrid_cut(img=img_cut_1_4, img_path='dum-{}'.format(i // 2 + 1))
                    img_cut_5_8 = hybrid_cut(img=img_cut_5_8, img_path='dum-{}'.format(i // 2 + 5))
                cv2.imwrite('{}-{}.png'.format(img_path, str(i // 2 + 1)), img_cut_1_4)
                cv2.imwrite('{}-{}.png'.format(img_path, str(i // 2 + 5)), img_cut_5_8)
项目:garden.facelock    作者:kivy-garden    | 项目源码 | 文件源码
def store_raw_images():
    '''To download images from image-net
        (Change the url for different needs of cascades)
    '''
    neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
    neg_image_urls = urllib2.urlopen(neg_images_link).read().decode()

    pic_num = 1

    for i in neg_image_urls.split('\n'):
        try:

            print i
            urllib.urlretrieve(i, "neg/" + str(pic_num) + '.jpg')
            img = cv2.imread("neg/" + str(pic_num) +'.jpg',
                                cv2.IMREAD_GRAYSCALE)
            resized_image = cv2.resize(img, (100, 100))
            cv2.imwrite("neg/" + str(pic_num) + '.jpg', resized_image)
            pic_num = pic_num + 1

        except:
            print "error"
项目:PixivAvatarBot    作者:kophy    | 项目源码 | 文件源码
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True;
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def __plot_canvas(self, show, save):
        if len(self.result) == 0:
            raise Exception('Please run blur_image() method first.')
        else:
            plt.close()
            plt.axis('off')
            fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10))
            if len(self.result) > 1:
                for i in range(len(self.result)):
                        axes[i].imshow(self.result[i])
            else:
                plt.axis('off')

                plt.imshow(self.result[0])
            if show and save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
                plt.show()
            elif save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
            elif show:
                plt.show()
项目:temporal-segment-networks    作者:yjxiong    | 项目源码 | 文件源码
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list
项目:Machine-Learning    作者:Jegathis    | 项目源码 | 文件源码
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows()
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:u1234x1234    | 项目源码 | 文件源码
def f(image_id):
#    if os.path.exists('test_poly_{}_{}/{}.png'.format(version, epoch, image_id)):
#        print(image_id)
#        return
    print('begin: {}'.format(image_id))

    p = d[image_id]
    p = [wkt.loads(x) for x in p]
    y_sf, x_sf = get_scale_factor(image_id, size, size)
    p = [affinity.scale(x, xfact=x_sf, yfact=y_sf, origin=(0, 0, 0)) for x in p]
    rst = rasterize_polgygon(p, size, size)
    color_rst = colorize_raster(rst)
    im = get_rgb_image(image_id, size, size)

    rr = np.hstack([color_rst, im])
    cv2.imwrite('test_poly_{}_{}-cv/{}.png'.format(version, epoch, image_id), rr)
    print('end: {}'.format(image_id))
项目:blind-watermark    作者:linyacool    | 项目源码 | 文件源码
def decode(ori_path, img_path, res_path, alpha):
    ori = cv2.imread(ori_path)
    img = cv2.imread(img_path)
    ori_f = np.fft.fft2(ori)
    img_f = np.fft.fft2(img)
    height, width = ori.shape[0], ori.shape[1]
    watermark = (ori_f - img_f) / alpha
    watermark = np.real(watermark)
    res = np.zeros(watermark.shape)
    random.seed(height + width)
    x = range(height / 2)
    y = range(width)
    random.shuffle(x)
    random.shuffle(y)
    for i in range(height / 2):
        for j in range(width):
            res[x[i]][y[j]] = watermark[i][j]
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
项目:soja_box    作者:iTaa    | 项目源码 | 文件源码
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    ??image
    :param img_path: image???
    :param mini_size: ??????
    :param jpeg_quality: jpeg?????
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params])
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
    '''Generate test images as solid blocks of colour of known size, save to filename fn.'''
    # Create a zero (black) image of HD size with 3 colour dimensions.  Colour space assumed BGR by default.
    h = 1080
    w = 1920
    img = np.zeros((h,w,3),dtype="uint8")
    # Want to set all of the pixels to bgr tuple, default red, 8 bit colour
    if not bgr:
        bgr = [0,0,255]
    img[:,:] = bgr
    vw = ImageViewer(img)
    vw.windowShow()
    #cv2.imshow("zeroes", frame)
    #ch = 0xff & cv2.waitKey(10000)
    #cv2.destroyAllWindows()
    cv2.imwrite(fn, img)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))

    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]

    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)

    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def locate(self, all = False, show = False, outimg = None):
        for (transition, mask) in self.transitions:
            if transition == 1:
                sfv3 = SquareFinderV3(mask, cos_limit = 0.5)
                squares = sfv3.find(self.mode)
                if show:
                    SquaresOverlayV4(mask, squares, all = all)
                    SquaresOverlayV4(mask, squares, all = False)
                else:
                    square_contours = [square.contour for square in squares]
                    best_contours_tuples = classify_multi_monitors_contour_set(square_contours)
                    found = mask.copy()
                    self.best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples]
                    cv2.drawContours( found, self.best_contours, -1, (0,0,255),3)
                    if outimg:
                        cv2.imwrite(outimg, found)
                return self.best_contours
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def idealised_model_B():
    '''As idealised_model_A but scale the images down for rapid prototyping.'''
    rw = RegularWall(1920, 1080, 2, 2, 100, 100, 150, 150)
    blackBackground = 'data/blackHDSolidBlock.jpg'
    whiteBackground = 'data/whiteHDSolidBlock.jpg'
    yellowBackground = 'data/yellowHDSolidBlock.jpg'
    rw.wall.add_bg(yellowBackground)
    rw.wall.render(pixelColor=BLACK, pixelThickness=-1)
    pdb.set_trace()
    black = resize_to_width(rw.wall.img, 1024)
    cv2.imwrite('data/ideal_black_w1024.png', black)
    vw = ImageViewer(rw.wall.img)
    vw.windowShow()
    rw.wall.render(pixelColor=WHITE, pixelThickness=-1)
    white = resize_to_width(rw.wall.img, 1024)
    cv2.imwrite('data/ideal_white_w1024.png', white)
    vw = ImageViewer(rw.wall.img)
    vw.windowShow()
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def write_raw(dir_path, image):
    """
    Write an image to a file (path) with the label as subfolder
    :param dir_path: The base directory we are going to write to
    :param image: The OpenCV image
    """

    if dir_path is None:
        return False

    # Check if path exists, otherwise created it
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # Check if path exists, otherwise created it
    raw_dir = dir_path + "/raw"
    if not os.path.exists(raw_dir):
        os.makedirs(raw_dir)

    filename = "%s/%s.jpg" % (raw_dir, datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S_%f"))
    cv2.imwrite(filename, image)

    return True
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def save_images(self, dirname='dump'):
        import os
        img_no = 1

        # Makes the directory
        if not os.path.exists('./' + dirname):
            os.mkdir(dirname)

        while True:
            self.grab_frame()

            if self.debug:
                cv2.imshow('frame', self.img)

            k = cv2.waitKey(1) & 0xFF

            if k == ord('s'):
                cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img)
                img_no += 1

            elif k == ord('q'):
                break

        cv2.destroyAllWindows()
    # Destructor
项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def compute(self, image):
        directory = ''.join(random.choice(string.lowercase) for _ in range(8))
        if not os.path.exists(directory):
            os.makedirs(directory)
        image = cv2.resize(image, self.size)
        patches = []
        patches.append(image)
        patches.append(image[:self.patch_size, :self.patch_size])
        patches.append(image[32:,32:])
        patches.append(image[32:, :self.patch_size])
        patches.append(image[:self.patch_size, 32:])
        patches.append(image[16:-16, 16:-16])
        patches.append(image[16:-16, 32:])
        patches.append(image[16:-16, :self.patch_size])
        patches.append(image[32:, 16:-16])
        patches.append(image[:self.patch_size, 16:-16])

        descriptor = np.zeros((1,4096))
        for i in range(len(patches)):
            filepath = os.path.join(directory, ("%d.jpg" % i))
            cv2.imwrite(filepath, patches[i])
            descriptor = descriptor + self.compute_oversample(filepath)
        shutil.rmtree(directory)
        return descriptor/len(patches)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def interactive_save(image):
    img_str = cv2.imencode('.png', image)[1].tostring()
    imgpil = Image.open(StringIO(img_str))

    root = Tkinter.Tk()
    root.geometry('{}x{}'.format(400, 400))
    imgtk = ImageTk.PhotoImage(image=imgpil)
    panel = Tkinter.Label(root, image=imgtk) #.pack()
    panel.pack(side="bottom", fill="both", expand="yes")
    Tkinter.Button(root, text="Hello!").pack()
    save_to = tkSimpleDialog.askstring("Save cropped image", "Enter filename")
    if save_to:
        if save_to.find('.') == -1:
            save_to += '.png'
        print 'Save to:', save_to
        cv2.imwrite(save_to, image)
    root.destroy()
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview')
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def main(serial=None, host=None, port=None):
    d = atx.connect(serial, host=host, port=port)
    while True:
        pilimg = d.screenshot()
        cv2img = imutils.from_pillow(pilimg)
        # cv2img = cv2.imread('tmp.png')
        # cv2.imwrite('tmp.png', cv2img)
        cv2img = cv2.resize(cv2img, fx=0.5, fy=0.5, dsize=(0, 0))
        pt = choose_point(cv2img)
        print 'click:', pt
        if pt:
            x, y = pt
            d.click(2*x, 2*y)
        cv2.waitKey(100)
        # import time
        # time.sleep(0.1)
项目:rekognition-video-utils    作者:awslabs    | 项目源码 | 文件源码
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def resize_addset(source_folder, target_folder, dsize, pattern=FILE_PATTERN):
    print('Resizing additional set...')
    if not os.path.exists(target_folder): os.makedirs(target_folder)
    for clazz in ClassNames:
        if clazz not in os.listdir(target_folder):
            os.makedirs(os.path.join(target_folder, clazz))

        total_images = glob.glob(os.path.join(source_folder, clazz, pattern))
        total = len(total_images)
        for i, source in enumerate(total_images):
            filename = ntpath.basename(source)
            target = os.path.join(target_folder, clazz, filename.replace('.jpg', '.png'))

            try:
                img = cv2.imread(source)
                img_resized = cv2.resize(img, dsize, interpolation=cv2.INTER_CUBIC)
                cv2.imwrite(target, img_resized)
            except:
                print('-------------------> error in: {}'.format(source))

            if i % 20 == 0:
                print("Resized {}/{} images".format(i, total))
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def dump_2dcoor():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        o1, o2 = runner(m1, m2)
        pts = []
        for k in range(14):
            pts.append((argmax_2d(o1[:,:,k]),
                argmax_2d(o2[:,:,k])))
        pts = np.asarray(pts)
        np.save('pts{}.npy'.format(cnt), pts)
        cv2.imwrite("frame{}.png".format(cnt), m1);
        if cnt == 10:
            break
项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def test_generate_proposals(self):
        self.assertEqual(self.total_anchors, len(self.shifts) *
                         self.anchor_target_layer.anchors.shape[0])

        min_x = self.all_anchors[:, 0].min()
        min_y = self.all_anchors[:, 1].min()
        max_x = self.all_anchors[:, 2].max()
        max_y = self.all_anchors[:, 3].max()
        canvas = np.zeros(
            (int(abs(min_y) + max_y) + 1,
             int(abs(min_x) + max_x) + 1), dtype=np.uint8)
        self.all_anchors[:, 0] -= min_x
        self.all_anchors[:, 1] -= min_y
        self.all_anchors[:, 2] -= min_x
        self.all_anchors[:, 3] -= min_y
        for anchor in self.all_anchors:
            anchor = list(six.moves.map(int, anchor))
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
        cv.imwrite('tests/all_anchors.png', canvas)
项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def test_keep_inside(self):
        inds_inside, anchors = self.inds_inside, self.anchors

        min_x = anchors[:, 0].min()
        min_y = anchors[:, 1].min()
        max_x = anchors[:, 2].max()
        max_y = anchors[:, 3].max()
        canvas = np.zeros(
            (int(max_y - min_y) + 1,
             int(max_x - min_x) + 1), dtype=np.uint8)
        anchors[:, 0] -= min_x
        anchors[:, 1] -= min_y
        anchors[:, 2] -= min_x
        anchors[:, 3] -= min_y
        for i, anchor in enumerate(anchors):
            anchor = list(six.moves.map(int, anchor))
            _canvas = np.zeros(
                (int(max_y - min_y) + 1,
                 int(max_x - min_x) + 1), dtype=np.uint8)
            cv.rectangle(
                _canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.imwrite('tests/anchors_inside_{}.png'.format(i), _canvas)
        cv.imwrite('tests/anchors_inside.png'.format(i), canvas)
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def img_to_string(img, char_set=None):
        cv2.imwrite("tmp\\ocr.png", img)
        command = "bin\\tess\\tesseract.exe --tessdata-dir bin\\tess\\tessdata tmp\\ocr.png tmp\\ocr "
        if char_set is not None:
            command += "-c tessedit_char_whitelist=" + char_set + " "
        command += "-psm 7 "
        command += "> nul 2>&1"
        CREATE_NO_WINDOW = 0x08000000
        subprocess.call(command, shell=True, creationflags=CREATE_NO_WINDOW)
        # Get the largest line in txt
        with open("tmp\\ocr.txt") as f:
            content = f.read().splitlines()
        output_line = ""
        for line in content:
            line = line.strip()
            if len(line) > len(output_line):
                output_line = line
        return output_line
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def interactive_save(image):
    img_str = cv2.imencode('.png', image)[1].tostring()
    imgpil = Image.open(StringIO(img_str))

    root = Tkinter.Tk()
    root.geometry('{}x{}'.format(400, 400))
    imgtk = ImageTk.PhotoImage(image=imgpil)
    panel = Tkinter.Label(root, image=imgtk) #.pack()
    panel.pack(side="bottom", fill="both", expand="yes")
    Tkinter.Button(root, text="Hello!").pack()
    save_to = tkSimpleDialog.askstring("Save cropped image", "Enter filename")
    if save_to:
        if save_to.find('.') == -1:
            save_to += '.png'
        print 'Save to:', save_to
        cv2.imwrite(save_to, image)
    root.destroy()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
    """Helper function to visualize and save any image"""
    image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
    image = image.astype(np.uint8)

    if resize: 
        image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))

    cv2.imshow(name, image)
    if cv2.waitKey(0) & 0xFF == ord('q'):
        cv2.destroyAllWindows()

    if save_image:
        assert path is not None
        cv2.imwrite(path, image)
项目:vehicle_brand_classification_CNN    作者:nanoc812    | 项目源码 | 文件源码
def imgPreprocess(img_dir, foldername):
    myfiles = glob.glob(img_dir+'*.jpg')

    temp = img_dir.split('/')
    newDir = '/'.join(temp[:(len(temp)-2)])
    if not os.path.exists(newDir+'/'+foldername+'/'):
        os.mkdir(newDir+'/'+foldername+'/')

    for filepath in myfiles:
        img = cv2.imread(filepath)
        logo = imgSeg(img)
        sd = filepath.rfind('/'); ed = filepath.find('.'); filename = filepath[int(sd+1):int(ed)]
        cv2.imwrite(newDir+'/'+foldername+'/'+filename+'.jpg',logo)
        print("car logo segmentation success,%s"%filename)