Python imutils 模块,resize() 实例源码

我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用imutils.resize()

项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def createTrainingData(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    try:
        os.makedirs("trainingdata_"+filename)
    except OSError:
        pass
    os.chdir("trainingdata_"+filename)
    length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vidcap.get(cv2.CAP_PROP_FPS))
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        cv2.imwrite('p1_'+str(time)+".png",p1)
        cv2.imwrite('p2_'+str(time)+".png",p2)
    os.chdir("..")
项目:Pedestrian_Detector    作者:alexander-hamme    | 项目源码 | 文件源码
def find_people(self, img):
        '''
        Detect people in image
        :param img: numpy.ndarray
        :return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture
        '''
        t = time.time()
        # HOG descriptor/person detector
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        # Chooses whichever size is less
        image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1]))
        # detect people in the image
        (rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE,
                                              padding=self.PADDING, scale=self.SCALE)
        # apply non-maxima suppression to the bounding boxes but use a fairly large overlap threshold,
        # to try to maintain overlapping boxes that are separate people
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD)

        print("Elapsed time: {} seconds".format(int((time.time() - t) * 100) / 100.0))

        if self.SHOW_IMAGES:
            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                # Tighten the rectangle around each person by a small margin
                shrinkW, shrinkH = int(0.05 * xB), int(0.15*yB)
                cv2.rectangle(image, (xA+shrinkW, yA+shrinkH), (xB-shrinkW, yB-shrinkH), self.BOX_COLOR, 2)

            cv2.imshow("People detection", image)
            cv2.waitKey(self.IMAGE_WAIT_TIME)
            cv2.destroyAllWindows()

        return len(pick)
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_face_classifier(file):
    face_cascade = cv2.CascadeClassifier(xml_face_classifier)
    image = cv2.imread(file)

    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(image, 1.07, 3)
    print faces
    for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
        #roi_gray = gray[y:y+h, x:x+w]
        #roi_color = image[y:y+h, x:x+w]

    cv2.imshow('Image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def image(self):

        img = cv2.imread(self.image_path)
        img = imutils.resize(img,width=min(800,img.shape[1]))
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(21,21),0)
        fullbody = self.HogDescriptor(gray)
        for (x,y,w,h) in fullbody:
            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

        faces = self.haar_facedetection(gray)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            eyes = self.haar_eyedetection(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) 
            smile = self.haar_smilecascade(roi_gray)
            for (sx,sy,sw,sh) in smile:
                cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
        img = self.dlib_function(img)
        cv2.imshow('img',img)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
项目:Pedestrian_Detector    作者:alexander-hamme    | 项目源码 | 文件源码
def find_people(self, img):
        '''
        Detect people in image
        :param img: numpy.ndarray
        :return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture
        '''
        t = time.time()
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        # Chooses whichever size is less
        image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1]))
        # detect people in the image
        (rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE,
                                              padding=self.PADDING, scale=self.SCALE)
        # apply non-maxima suppression to the bounding boxes using a
        # fairly large overlap threshold to try to maintain overlapping boxes that are still people
        rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
        pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD)

        print("Elapsed time of detection: {} seconds".format(int((time.time() - t) * 100) / 100.0))

        if self.SHOW_IMAGES:
            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                # Tighten the rectangle around each person by a small margin
                cv2.rectangle(image, (xA+5, yA+5), (xB-5, yB-10), self.BOX_COLOR, 2)

            cv2.imshow("People detection", image)
            cv2.waitKey(self.IMAGE_WAIT_TIME)
            cv2.destroyAllWindows()

        return len(pick)
项目:overwatch-counter-picker    作者:cheshire137    | 项目源码 | 文件源码
def __init__(self, original, is_cards_screen=False):
    self.original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
    (self.original_h, self.original_w) = self.original.shape[:2]
    self.threshold = 0.8
    self.is_cards_screen = False
    self.resized_w = self.original_w
    self.resized_h = self.original_h

    if self.original_w != TARGET_WIDTH:
      self.original = imutils.resize(self.original, width=TARGET_WIDTH)
      (self.resized_h, self.resized_w) = self.original.shape[:2]

    self.mid_height = int(self.resized_h / 2.0)

    # Now can detect if we're on the game-over screen with voting cards, since
    # we've scaled the image to the same size from which the 'rate match'
    # template was taken.
    self.is_cards_screen = self.detect_if_cards_screen()

  # Returns a unique list of tuples with x,y coordinates for the top left of
  # where the given template appears in the original image. Returns None if the
  # template was not detected.
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def addFrame(self, frame, width=300):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w * 2, self.h * 2), True)
            self.zeros = np.zeros((self.h, self.w), dtype="uint8")

        # break the image into its RGB components, then construct the
        # RGB representation of each frame individually
        (B, G, R) = cv2.split(frame)
        R = cv2.merge([self.zeros, self.zeros, R])
        G = cv2.merge([self.zeros, G, self.zeros])
        B = cv2.merge([B, self.zeros, self.zeros])

        # construct the final output frame, storing the original frame
        # at the top-left, the red channel in the top-right, the green
        # channel in the bottom-right, and the blue channel in the
        # bottom-left
        output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
        output[0:self.h, 0:self.w] = frame
        output[0:self.h, self.w:self.w * 2] = R
        output[self.h:self.h * 2, self.w:self.w * 2] = G
        output[self.h:self.h * 2, 0:self.w] = B

        # write the output frame to file
        self.writer.write(output)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def addFrame(self, frame, width=600):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w, self.h), True)
        # write the output frame to file
        self.writer.write(frame)
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_face_landmark(file, output=False, output_name='output'):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_face_landmark)

    image = cv2.imread(file)
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_size = gray.shape

    faces = detector(gray, 1)
    for (i, itr_face) in enumerate(faces):
        shape = predictor(gray, itr_face)
        shape = shape_to_np(shape)
        # convert dlib's rectangle to a OpenCV-style bounding box
        # [i.e., (x, y, w, h)], then draw the face bounding box
        (x, y, w, h) = rect_to_bb(itr_face, img_size, file)
        #print "landmark: ({:d}, {:d}) ({:d}, {:d})".format(x, y, w, h)

        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        # show the face number
        cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # loop over the (x, y)-coordinates for the facial landmarks
        # and draw them on the image
        for (x, y) in shape:
            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    # show the output image with the face detections + facial landmarks
    cv2.imshow(file, image)
    cv2.waitKey(0)
    if output:
        cv2.imwrite("../" + str(output_name + 1) + '.jpg', image)
    cv2.destroyAllWindows()
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_Data_Augmentation(blur=False, sigma=1.0, hflip=False, vflip=False, hvsplit=False, randbright=False):
    image = cv2.imread('Dataset/young/female/180.jpg', 0)
    #image = cv2.imread('Dataset/young/female/285.jpg', 0)
    #image = cv2.resize(image, (100, 100))
    cv2.imshow('Image', image)

    # Data Augmentation:
    # Gaussian Blurred 
    if blur:
        cv2.imshow('Blur', gaussian_filter(input=image, sigma=sigma))
        #cv2.imwrite("Blur_{:1.1f}.jpg".format(sigma), 
        #           gaussian_filter(input=image, sigma=sigma))
        cv2.imwrite("../xBlur_{:1.1f}.jpg".format(sigma), 
                    gaussian_filter(input=image, sigma=sigma))
    # Flip and Rotate
    if (hflip and not vflip) or (hflip and hvsplit):
        cv2.imshow('hflip', np.fliplr(image))
        cv2.imwrite("../hflip.jpg", np.fliplr(image))
    if (vflip and not hflip) or (vflip and hvsplit):
        cv2.imshow('vflip', np.flipud(image))
        cv2.imwrite("../vflip.jpg", np.flipud(image))
    if hflip and vflip and not hvsplit:
        cv2.imshow('rot 180', np.rot90(image, k=2))
        cv2.imwrite("../rot2k.jpg", np.rot90(image, k=2))
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def dlib_function(self,image):

        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(self.shape_predictor)
        image = imutils.resize(image, width=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rects = detector(image, 1)

        for (i, rect) in enumerate(rects):
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            for (x, y) in shape:
                cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
        return image
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def lipSegment(img):
    img = imutils.resize(img,width=300)
    img_copy = img.copy()

    landmarks = dlib_obj.get_landmarks(img)
    dlib_obj.get_face_mask(img_copy, landmarks)

    output_img = img-img_copy
    output_img = cv2.cvtColor(output_img,cv2.COLOR_BGR2GRAY)

    contours,hierarchy = cv2.findContours(output_img.copy(), cv2.cv.CV_RETR_EXTERNAL, cv2.cv.CV_CHAIN_APPROX_SIMPLE)  #cv2.findContours(image, mode, method
    cv2.drawContours(img, contours, -1, (0,255,0), 2,maxLevel=0)

    cnt = contours[0]
    ellipse = cv2.fitEllipse(cnt)
    (x,y),(MA,ma),angle = cv2.fitEllipse(cnt)


    a = ma/2
    b = MA/2


    eccentricity = sqrt(pow(a,2)-pow(b,2))
    eccentricity = round(eccentricity/a,2)

    font = cv2.FONT_HERSHEY_SIMPLEX

    cv2.putText(img,'Eccentr= '+str(round(eccentricity,3)),(10,350), font, 1,(255,0,0),2,16)

    if(eccentricity < 0.9):
        cv2.putText(img,'Commands = O',(10,300), font, 1,(0,0,255),2,16)
    else:
        cv2.putText(img,'Commands = E',(10,300), font, 1,(0,0,255),2,16)

    return img
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def predictFacialLandmark(img, detector):
    img = imutils.resize(img, width=500)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image
    rects = detector(gray, 1)
    return rects
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def readImage(img_path):
    image = cv2.imread(img_path)
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    return image, gray
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def drawEyes(eye, image):
    (x, y, w, h) = cv2.boundingRect(np.array([eye]))
    h = w
    y = y - h / 2
    roi = image[y:y + h, x:x + w]
    roi = imutils.resize(roi, width=24, inter=cv2.INTER_CUBIC)

    return roi
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def predictFacialLandmark(img, detector):
    img = imutils.resize(img, width=500)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image
    rects = detector(gray, 1)
    return rects
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def readImage(img_path):
    image = cv2.imread(img_path)
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    return image, gray
项目:frc-livescore    作者:andrewda    | 项目源码 | 文件源码
def getScoreboard(self, img):
        template_width = self.TEMPLATE_SCOREBOARD.shape[1]
        img_width = img.shape[1]
        template = imutils.resize(self.TEMPLATE_SCOREBOARD,
                                  width=int(template_width/1280.0*img_width))
        top_left, bottom_right = self.matchTemplate(img, template)

        return img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
项目:frc-livescore    作者:andrewda    | 项目源码 | 文件源码
def getTopBar(self, img):
        template_width = self.TEMPLATE_TOP.shape[1]
        img_width = img.shape[1]
        template = imutils.resize(self.TEMPLATE_TOP,
                                  width=int(template_width/1280.0*img_width))
        top_left, bottom_right = self.matchTemplate(img, template)

        located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
        h, w = located.shape[:2]

        return located[:, int(w*0.125):int(w*0.5)]
项目:frc-livescore    作者:andrewda    | 项目源码 | 文件源码
def getTimeArea(self, img):
        template_width = self.TEMPLATE_TIME.shape[1]
        img_width = img.shape[1]
        template = imutils.resize(self.TEMPLATE_TIME,
                                  width=int(template_width/1280.0*img_width))
        top_left, bottom_right = self.matchTemplate(img, template)

        located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
        h, w = located.shape[:2]

        return located[int(h*0.16):int(h*0.84), int(w*0.42):int(w*0.58)]
项目:frc-livescore    作者:andrewda    | 项目源码 | 文件源码
def getScoreArea(self, img):
        template_width = self.TEMPLATE_SCORES.shape[1]
        img_width = img.shape[1]
        template = imutils.resize(self.TEMPLATE_SCORES,
                                  width=int(template_width/1280.0*img_width))
        top_left, bottom_right = self.matchTemplate(img, template)

        return img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def videoToImageArray(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    pictures = [[],[]]
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)      # just cue to 20 sec. position
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        pictures[0].append(p1)
        pictures[1].append(p2)
    return pictures
项目:2016-Vision    作者:Team4761    | 项目源码 | 文件源码
def capture_images():
    """
    print "Staring capture thread"
    global frame
    while True:
        print "Attempting capture"
        (grabbed, f) = stream.read()
        if grabbed:
            print "Captured"
            lock.acquire()
            frame = imutils.resize(f, width=resolution[0], height=resolution[1])
            lock.release()
    """
    print "started capturing thread"
    global frame
    with picamera.PiCamera() as camera:
        camera.resolution = resolution
        camera.shutter_speed = 250
        time.sleep(0.5) # Shutter speed is not set instantly. This wait allows time for changes to take effect.
        print "Initialized camera..."
        with picamera.array.PiRGBArray(camera) as stream:
            for foo in camera.capture_continuous(stream, format="bgr", use_video_port=True):
                print "Captured an image"
                stream.seek(0)
                stream.truncate()
                lock.acquire()
                frame = stream.array
                lock.release()
                print "Converted image data to array"
项目:digital-display-character-rec    作者:upupnaway    | 项目源码 | 文件源码
def cnvt_edged_image(img_arr, should_save=False):
  # ratio = img_arr.shape[0] / 300.0
  image = imutils.resize(img_arr,height=300)
  gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11, 17, 17)
  edged_image = cv2.Canny(gray_image, 30, 200)

  if should_save:
    cv2.imwrite('cntr_ocr.jpg')

  return edged_image
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def createImagePyramid(im, scale, minHeight, minWidth):
    yield im
    while (True):
        reducedWidth = int(round(im.shape[1] / scale))
        reducedHeight = int(round(im.shape[0] / scale))
        im = imutils.resize(im, width=reducedWidth, height=reducedHeight)
        if ((im.shape[0] >= minHeight) and (im.shape[1] >= minWidth)):
            yield im
        else:
            break
项目:overwatch-counter-picker    作者:cheshire137    | 项目源码 | 文件源码
def scale_template_for_cards_screen(self, template):
    (height, width) = template.shape[:2]
    new_width = int(math.ceil(width * 0.78))
    return imutils.resize(template, width=new_width)

  # Returns true if the given y-axis position represents a hero on the red team.
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def face_extraction(path):
    path_str = path[:-1] if path.endswith('/') else path
    output_dir = path_str + '_faces'
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_face_landmark)
    face_cascade = cv2.CascadeClassifier(xml_face_classifier)
    undetectLst = list()

    numfile = get_dataInfo(path_str)
    not_detected = 0
    itr = 0

    for itr_file in os.listdir(path_str):
        if itr_file.endswith('.jpg'):
            file = "{:s}/{:s}".format(path_str, itr_file)
            image = cv2.imread(file)
            image = imutils.resize(image, width=500)
            bFace, faces = facial_landmark_detection(image, detector, predictor, file)

            if not bFace:
                bFace, faces = face_detect_classifier(image, face_cascade)
                if not bFace:
                    print file
                    undetectLst.append(file)
                    not_detected += 1
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    cv2.imwrite("{:s}/{:s}".format(output_dir, itr_file), image)
                    continue
            x, y, w, h = faces
            crop_img = image[y:y + h, x:x + w]
            crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
            cv2.imwrite("{:s}/{:s}".format(output_dir, itr_file), crop_img)
            itr += 1 
        else:
            continue
    total = itr + not_detected
    print "{:s}: {:4d} of {:4d} file missed detected, detect rate {:2.2f}%"\
    .format(path_str, not_detected, total, 100.0 * itr / total)
    return undetectLst, total
项目:face-and-Pedestrian-detection-    作者:xiaoerlaigeid    | 项目源码 | 文件源码
def detect():
    move=0
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    cap=cv2.VideoCapture(0)
    while(1):
        ret, img=cap.read()
        gray=cv2. cvtColor(img, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(img, width=min(400, img.shape[1]))
        (rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),padding=(8, 8), scale=1.05)
        for (x, y, w, h) in rects:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
            if (xA/480)>0.5 :
                print("move to right")
                move=4

            elif (yA/640)>0.5:
                print('move to down')
                move=3
            elif (xB/480)<0.3:
                print('move to left')
                move=2
            elif (yB/640)<0.3:
                print('move to up')
                move=1
            else:
                print('do nothing')
                move=0
            mqt.pass_message(move)
            #eyes = eye_cascade.detectMultiScale(roi_gray)

            #for (ex,ey,ew,eh) in eyes:
            #   cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
        cv2.imshow('img',image)
        k=cv2.waitKey(1)& 0xff
        if k==27:
            break
        elif (k==ord('w')):
            mqt.pass_message(1)
        elif (k==ord('s')):
            mqt.pass_message(3)

    cap.release()
    cv2.destroyAllWindows()
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def main():
    print "\t\t########################################"
    print "\t\tOPTIMISED = ",cv2.useOptimized()," !!!!"
    print "\t\t########################################"

    while True:
        ret,img=cap.read()
        #img = cv2.medianBlur(img,3)    # 5 is a fairly small kernel size
        img = cv2.resize(img,None,fx=1.3,fy=1,interpolation = cv2.INTER_LINEAR)

        hand_box = [(0,50),(400,400)]
        head_box = [(500,50),(800,400)]
        cv2.rectangle(img,hand_box[0],hand_box[1],(255,255,255),2)
        cv2.rectangle(img,head_box[0],head_box[1],(50,50,50),2)

        head_frame = img[50:400,500:800]
        try:
            img[50:400,500:800] = lipSegment(head_frame)    
        except ValueError, e:
            #print e
            pass

        hand_frame = img[50:400,0:400]

        try:
            mask,counter,hull,(cx,cy),list_far,list_end = count_fingers(hand_frame)

            if(cv2.contourArea(hull)>3000) and list_far:
                cv2.drawContours(hand_frame,[hull],0,(0,255,0),1)
                [cv2.circle(hand_frame,far,5,[0,0,0],-1) for far in list_far]
                [cv2.circle(hand_frame,end,5,[150,150,150],-1) for end in list_end]
                cv2.putText(hand_frame,"Fingers = "+str(counter+1),(10,250),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,1)

        except ZeroDivisionError, e:
            print "Count_fingers ZeroDivisionError: ",e
        except UnboundLocalError,e:
            print "Count_fingers UnboundLocalError: ",e



        cv2.imshow('Img',img)

        if cv2.waitKey(20)&0xff==ord('q'):
            cv2.imwrite('output.jpg',img)
            cv2.imwrite('Mask.jpg',mask)
            cv2.imwrite('hand_frame.jpg',hand_frame)
            break

    cap.release()
    cv2.destroyAllWindows()
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def create_dataset(path, dest, dist, transpose, rgb, resize_factor):
    cap = cv2.VideoCapture(path)
    length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    width  = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))

    fac = 3 if rgb else 1

    rwidth = int(width * resize_factor * fac)
    rheight = int(height * resize_factor * fac)

    frames = 0
    I = np.zeros([rwidth*rheight * fac, length])
    Phi = np.zeros([3, length])

    while True:
        (grabbed, frame) = cap.read()

        if not grabbed: break       

        p, r = detect_ball(frame)

        if p != None:
            Phi[:,frames] = dist(p, r)[0:3]
        else:
            Phi[:,frames] = np.nan

        img = frame

        if not rgb:
            img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        img = imutils.resize(img, width=rwidth, height=rheight, inter=cv2.INTER_AREA)

        I[:,frames] = img.T.ravel() if transpose else img.ravel()

        frames += 1

    print("Got {0} frames".format(frames))

    cap.release()
    cv2.destroyAllWindows()

    data = {'I' : I, 'h' : rheight, 'w' : rwidth, 'Phi' : Phi}
    scipy.io.savemat(dest, data , do_compression=True)