Python cv2 模块,minMaxLoc() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.minMaxLoc()

项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
项目:meleedb-segment    作者:sashahashi    | 项目源码 | 文件源码
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200):
        if roi is not None:
            scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)]

        if not scale:
            scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr)
        peaks = []

        if scale:
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            canny_scene = cv2.Canny(scene, threshold_min, threshold_max)
            canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max)

            # Threshold for peaks.
            corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED)
            _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map)

            good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance)))
            if debug:
                print(max_corr, good_points)
            clusters = self.get_clusters(good_points, max_distance=max_distance)
            peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters]

        return (scale, peaks)
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = numpy.array(numpy.clip(mono16, 0, 255), dtype=numpy.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def detectmarker(image):
    grayscale = getgrayimage(image)
    mkradius = getapproxmarkerradius(grayscale) # approximate marker radius
    marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker

    #template matching
    matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32

    #detect 4 greatest values
    markerposarray = []
    for i in range(4):
        (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
        markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc))) 
        cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc

    return markerposarray
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
项目:meleedb-segment    作者:sashahashi    | 项目源码 | 文件源码
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8):
        best_corr = 0
        best_scale = 0

        for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
            _, max_val, _, _ = cv2.minMaxLoc(result)

            if max_val > best_corr:
                best_corr = max_val
                best_scale = scale

        if best_corr > min_corr:
            return best_scale
        else:
            return None
项目:meleedb-segment    作者:sashahashi    | 项目源码 | 文件源码
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8):
    best_corr = 0
    best_scale = 0

    scale = min_scale
    for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
        scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

        result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
        _, max_val, _, max_loc = cv2.minMaxLoc(result)

        if max_val > best_corr:
            best_corr = max_val
            best_scale = scale

    if best_corr > min_corr:
        return best_scale
    else:
        return None
项目:object-detector    作者:penny4860    | 项目源码 | 文件源码
def test_crop_random():
    # Given one sample image and the following parameters
    image = helpers.get_one_sample_image()
    parameters = {"dst_size" : (20, 20),
        "n_patches" : 5,
    }

    # When perform crop_random()
    patches = utils.crop_random(image, parameters["dst_size"], parameters["n_patches"])

    # Then every patch should be included in an image.
    match_cost = []
    for patch in patches:
        M = cv2.matchTemplate(image, patch, cv2.TM_SQDIFF)
        min_cost, _, _, _ = cv2.minMaxLoc(M)
        match_cost.append(min_cost)
    assert np.array(match_cost).all() == 0, "utils.crop_random() unit test failed!!"
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def mkgray(self, msg):
        """
        Convert a message into a 8-bit 1 channel monochrome OpenCV image
        """
        # as cv_bridge automatically scales, we need to remove that behavior
        # TODO: get a Python API in cv_bridge to check for the image depth.
        if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:

            mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
            mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
            return mono8
        elif 'FC1' in msg.encoding:
            # floating point image handling
            img = self.br.imgmsg_to_cv2(msg, "passthrough")
            _, max_val, _, _ = cv2.minMaxLoc(img)
            if max_val > 0:
                scale = 255.0 / max_val
                mono_img = (img * scale).astype(np.uint8)
            else:
                mono_img = img.astype(np.uint8)
            return mono_img
        else:
            return self.br.imgmsg_to_cv2(msg, "mono8")
项目:endless-lake-player    作者:joeydong    | 项目源码 | 文件源码
def match_template(screenshot, template):
    # Perform match template calculation
    matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)

    # Survey results
    (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches)

    # Load template size
    (template_height, template_width) = template.shape[:2]

    return {
        "x1": max_loc[0],
        "y1": max_loc[1],
        "x2": max_loc[0] + template_width,
        "y2": max_loc[1] + template_height,
        "center": {
            "x": max_loc[0] + (template_width / 2),
            "y": max_loc[1] + (template_height / 2)
        },
        "score": max_val
    }
项目:flight-stone    作者:asmateus    | 项目源码 | 文件源码
def findTarget(self):
        result = cv2.matchTemplate(self.current_frame, self.root_patch.patch, self.match_method)
        _, _, _, max_loc = cv2.minMaxLoc(result)

        # Select found target
        target_top_left = max_loc
        target_bottom_right = (
            target_top_left[0] + self.patch_w,
            target_top_left[1] + self.patch_h)

        # Update Patch with current info
        patch = self.root_patch.copy()
        patch.patch = self.current_frame[
            target_top_left[1]: target_bottom_right[1] + 1,
            target_top_left[0]: target_bottom_right[0] + 1, :]
        patch.p1 = Point(x=target_top_left, y=target_bottom_right)
        self.assignRootPatch(patch)

        self.tracker = KCFTracker(True, True, True)
        self.tracker.init(
            [target_top_left[0], target_top_left[1], self.patch_w, self.patch_h],
            self.current_frame)

        return (target_top_left, target_bottom_right)
项目:flight-stone    作者:asmateus    | 项目源码 | 文件源码
def detect(self, z, x):
        k = self.gaussianCorrelation(x, z)
        res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))

        _, pv, _, pi = cv2.minMaxLoc(res)   # pv:float  pi:tuple of int
        p = [float(pi[0]), float(pi[1])]   # cv::Point2f, [x,y]  #[float,float]

        if(pi[0] > 0 and pi[0] < res.shape[1] - 1):
            p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
        if(pi[1] > 0 and pi[1] < res.shape[0] - 1):
            p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])

        p[0] -= res.shape[1] / 2.
        p[1] -= res.shape[0] / 2.

        return p, pv
项目:smart-cam    作者:smart-cam    | 项目源码 | 文件源码
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi):
        logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id,
                                                                                len(faces_roi)))
        # First Time
        if frame_prev.size == 0:
            return len(faces_roi)

        uniq_faces_curr_frame = 0

        for template_roi in faces_roi:
            # Apply template Matching
            res = cv2.matchTemplate(frame_prev,
                                    template_roi,
                                    cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc))



        logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame))
        return uniq_faces_curr_frame
项目:cv-utils    作者:gmichaeljaison    | 项目源码 | 文件源码
def match_one(template, image, options=None):
    """
    Match template and find exactly one match in the Image using specified features.

    :param template: Template Image
    :param image: Search Image
    :param options: Options include
        - features: List of options for each feature
    :return: (Box, Score) Bounding box of the matched object, Heatmap value
    """
    heatmap, scale = multi_feat_match(template, image, options)

    min_val, _, min_loc, _ = cv.minMaxLoc(heatmap)
    top_left = tuple(scale * x for x in min_loc)
    score = min_val

    h, w = template.shape[:2]
    return Box(top_left[0], top_left[1], w, h), score
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def matchTemplate(img_full, img_template, meth):
    w, h = img_template.shape[::-1]
    img = img_full.copy()

    # Apply template Matching
    method = eval(meth)
    res = cv2.matchTemplate(img,img_template,method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return [top_left, bottom_right]
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def matchTemplate(img_full, img_template, meth):
    w, h = img_template.shape[::-1]
    img = img_full.copy()

    # Apply template Matching
    method = eval(meth)
    res = cv2.matchTemplate(img,img_template,method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return [top_left, bottom_right]
项目:fatego-auto    作者:lishunan246    | 项目源码 | 文件源码
def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
项目:awesome-opencv-scripts    作者:hs105    | 项目源码 | 文件源码
def find_template(template):
    method = 'cv2.TM_CCOEFF'
    w, h = template.shape[::-1]
    res = cv2.matchTemplate(image, template, eval(method))
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return top_left, bottom_right, res
项目:key-face    作者:gabrielilharco    | 项目源码 | 文件源码
def detectTemplateMatching(self, img):
        self.templateMatchingCurrentTime = cv2.getTickCount()
        duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency()
        if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0:
            self.foundFace = False
            self.isTemplateMatchingRunning = False
            return

        faceTemplate = self.getSubRect(img, self.trackedFaceTemplate)
        roi = self.getSubRect(img, self.trackedFaceROI)
        match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED)
        cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1)


        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match)
        foundTemplate = (
            minLoc[0] + self.trackedFaceROI[0],
            minLoc[1] + self.trackedFaceROI[1],
            self.trackedFaceTemplate[2],
            self.trackedFaceTemplate[3])

        self.trackedFaceTemplate = foundTemplate
        self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2)
        self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
项目:pytomatic    作者:N0K0    | 项目源码 | 文件源码
def test_templating(self):

        bbox = self.wh.create_boundingbox()
        scaled_bbox = self.wh.bbox_scale(bbox,0.5)
        sub_image = self.px.grab_window(scaled_bbox)
        sub_image = self.px.img_to_numpy(sub_image)

        w, h = sub_image.shape[0:2]

        main_image = cv2.imread('pytomatic/tests/assets/calc_clean.PNG')
        res = cv2.matchTemplate(main_image, sub_image, cv2.TM_CCOEFF)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        top_left = max_loc
        bottom_right = (top_left[0] + w, top_left[1] + h)
        cv2.rectangle(main_image, top_left, bottom_right, 255, 2)
        assert top_left == (89,89)
        assert bottom_right == (290,290)

        #plt.imshow(main_image)
        #plt.show()
项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
    """
        Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
        @threshold percentage of similarity
    """
    __readDigitTemplates()
    digit = digit.copy()
    if digit.shape[2] == 3:
        digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
    ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
    bestDigit = -1
    if method == REC_METHOD_TEMPLATE_MATCHING:
        bestMatch = None
        for i in range(len(__DIGIT_TEMPLATES)):
            template = __DIGIT_TEMPLATES[i].copy()

            if digit.shape[1] < template.shape[1]:
                template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
            else:
                digit = cv2.resize(digit, (template.shape[1], template.shape[0]))

            result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
            (_, max_val, _, max_loc) = cv2.minMaxLoc(result)
            if bestMatch is None or max_val > bestMatch:
                bestMatch = max_val
                bestDigit = i
                print("New Best Match:", bestMatch, bestDigit)

    if (bestMatch * 100) >= threshold:
        return (bestDigit, bestMatch * 100)

    return (-1, 0)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def find_matches(img, template_list):
    # Make a copy of the image to draw on
    # Define an empty list to take bbox coords
    bbox_list = []
    # Iterate through template list
    # Read in templates one by one
    # Use cv2.matchTemplate() to search the image
    #     using whichever of the OpenCV search methods you prefer
    # Use cv2.minMaxLoc() to extract the location of the best match
    # Determine bounding box corners for the match
    # Return the list of bounding boxes
    method = cv2.TM_CCOEFF_NORMED
    for temp in templist:
        tmp = mpimg.imread(temp)
        # Apply template Matching
        res = cv2.matchTemplate(img,tmp,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        w, h = (tmp.shape[1], tmp.shape[0])

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc

        bottom_right = (top_left[0] + w, top_left[1] + h)
        bbox_list.append((top_left, bottom_right))
    return bbox_list
项目:Rapider    作者:yazdipour    | 项目源码 | 文件源码
def ocr():
    img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2]
    # img = cv2.equalizeHist(img)
    index=0
    for tmp in templates:
        res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        ix,iy=max_loc[0]/pw,max_loc[1]/ph
        strx=txtbox[iy][ix].get()
        index=index+1
        txtbox[iy][ix].insert(len(strx),str(index))
    return
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def getcroppedarea(img, markersize):
    #use template matching to detect area to be cropped
    grayimg = getgrayimage(img)
    # detect top-left marker using template matching
    marker_tl = cv2.resize(MARKER_TL, (markersize, markersize))
    matched = cv2.matchTemplate(grayimg, marker_tl, cv2.TM_CCORR_NORMED) #returns float32
    (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)

    mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
    pos_tl = (mkrect.x+mkrect.w, mkrect.y+mkrect.h)
    #pos_tl = (maxloc[0]+markersize, maxloc[1]+markersize)

    # detect bottom-right marker using template matching
    marker_br = cv2.resize(MARKER_BR, (markersize, markersize))
    matched = cv2.matchTemplate(grayimg, marker_br, cv2.TM_CCORR_NORMED) #returns float32
    (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)

    mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
    pos_br = (mkrect.x, mkrect.y)
    #pos_br = maxloc

    #detect QR code
    qrarea = img[pos_br[1]:,:img.shape[0]-pos_br[1]]
    typ, val = passzbar.passzbar(qrarea)

    if not typ:
        return None, None
    strval = val.decode('ascii').strip()
    #print(strval)

    #cv2.circle(img, pos_tl, 5, (255, 0, 0), -1)
    #cv2.circle(img, pos_br, 5, (0, 255, 0), -1)
    #print(pos_tl, pos_br
    #cv2.imshow("hoge", img)
    #cv2.imshow("hoge", img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]])
    # crop and return detected area
    return strval, img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def argmax_2d(heatmap):
    _, _, _, top_left = cv2.minMaxLoc(heatmap)
    return top_left[::-1]
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
项目:cv-sample-python    作者:macaca-sample    | 项目源码 | 文件源码
def match(self, templateimage, threshold=0.8):
        image = cv2.imread(self.sourceimage)
        template = cv2.imread(templateimage)
        result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
        similarity = cv2.minMaxLoc(result)[1]
        if similarity < threshold:
            return similarity
        else:
            return np.unravel_index(result.argmax(), result.shape)
项目:py2DIC    作者:Geod-Geom    | 项目源码 | 文件源码
def template_match(img_master, img_slave, method = 'cv2.TM_CCOEFF_NORMED', mlx = 1, mly = 1, show=True):    

    # Apply image oversampling 
    img_master = cv2.resize(img_master,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)
    img_slave  = cv2.resize(img_slave,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)

    res = cv2.matchTemplate(img_slave,img_master,eval(method))

    w, h = img_master.shape[::-1]    
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc 
    bottom_right = (top_left[0] + w, top_left[1] + h)

    # Retrieve center coordinates
    px = (top_left[0]+bottom_right[0])/(2.0*mlx)
    py = (top_left[1]+bottom_right[1])/(2.0*mly)

    # Scale images for visualization
    img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0/500))
    img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0/500))

    cv2.rectangle(img_slave_scaled,top_left, bottom_right, 255, 2*mlx) 

    if show == True:
        plt.figure(figsize=(20,10))
        plt.subplot(131),plt.imshow(res,cmap = 'gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(132),plt.imshow(img_master_scaled,cmap = 'gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.subplot(133),plt.imshow(img_slave_scaled, cmap = 'gray')
        plt.suptitle(method)
        plt.show()

    return px, py, max_val
项目:airport    作者:cfircohen    | 项目源码 | 文件源码
def MatchTemplate(template, target):
  """Returns match score for given template"""
  res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
  min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
  return max_val
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def findImageStar(self, x, y):
        x -= self.min_max_range
        y -= self.min_max_range
        roi = self.image[y:y + 2 * self.min_max_range + 1, x:x + 2 * self.min_max_range + 1]
        roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        _, _, _, maxLoc = cv2.minMaxLoc(roi)
        x += maxLoc[0]
        y += maxLoc[1]

        return x, y
项目:libskeletal    作者:bobbybee    | 项目源码 | 文件源码
def jointPos(vis, n):
    I = np.reshape(vis[:, n*2]*vis[:, n*2] + vis[:, n*2+1]*vis[:, n*2+1], (SIZE, SIZE))
    return cv2.minMaxLoc(cv2.GaussianBlur(I, (SIZE / 4 + 1, SIZE / 4 + 1), 0))[2]
项目:frc-livescore    作者:andrewda    | 项目源码 | 文件源码
def matchTemplate(self, img, template):
        res = cv2.matchTemplate(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),
                                template,
                                cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        top_left = max_loc
        bottom_right = (top_left[0] + template.shape[1],
                        top_left[1] + template.shape[0])

        return top_left, bottom_right
项目:speed-camera    作者:pageauc    | 项目源码 | 文件源码
def check_image_match(full_image, small_image):
    # Look for small_image in full_image and return best and worst results
    # Try other MATCH_METHOD settings per config.py comments
    # For More Info See http://docs.opencv.org/3.1.0/d4/dc6/tutorial_py_template_matching.html
    result = cv2.matchTemplate( full_image, small_image, search_match_method)
    # Process result to return probabilities and Location of best and worst image match
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)  # find search rect match in new image
    return maxVal

#-----------------------------------------------------------------------------------------------
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def matchTemplate(self, img_full, img_template, aMeth):
        w, h = img_template.shape[::-1]
        img = img_full.copy()
        # Apply template Matching
        method = eval(aMeth)
        res = cv2.matchTemplate(img,img_template,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        bottom_right = (top_left[0] + w, top_left[1] + h)
        return [top_left, bottom_right]
项目:fatego-auto    作者:lishunan246    | 项目源码 | 文件源码
def click_on(self, name, repeat=False, loader=_imageLoader):
        if GameStatus().game_stage == GameStage.Stopped:
            return
        self.log('try click ' + name)
        p = loader.get(name)
        max_val = 0
        x, y = 0, 0
        while max_val < 0.8:
            if GameStatus().game_stage == GameStage.Stopped:
                return

            self.capture()
            res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            self.log(name + ' ' + str(max_val))
            x, y = max_loc
            time.sleep(self._delay)

        m, n, q = p.shape

        x += n / 2
        y += m / 2

        self._click(x, y)

        max_val = 1 if repeat else 0
        while max_val > 0.8:
            if GameStatus().game_stage == GameStage.Stopped:
                return

            time.sleep(1)
            self.capture()
            res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val > 0.8:
                self._click(x, y)
项目:fatego-auto    作者:lishunan246    | 项目源码 | 文件源码
def chances_of(self, name, loader=_imageLoader):
        self.capture()
        p = loader.get(name)
        res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        self.log('chances of ' + name + ': ' + str(max_val))
        return max_val
项目:awesome-opencv-scripts    作者:hs105    | 项目源码 | 文件源码
def find_template(template):
    method = 'cv2.TM_CCOEFF'
    w, h = template.shape[::-1][1:] # this reverse the order of the array
    res = cv2.matchTemplate(img, template, eval(method))
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print("matching score is %f" % max_val)
    top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return top_left, bottom_right, res, max_val
项目:sirbarksalot    作者:nmkridler    | 项目源码 | 文件源码
def calculate_score(self, img):
        mf = cv2.matchTemplate(img.astype('Float32'), self._template,
            cv2.TM_CCOEFF_NORMED)
        min_value, max_value, min_loc, max_loc = cv2.minMaxLoc(mf)
        return max_value
项目:KeyCat    作者:KatreMetsvahi    | 项目源码 | 文件源码
def get_template_location(self, template, picture):
        picture = convert_picture_to_grayscale(picture)
        picture_array = convert_picture_to_numpy_array(picture)

        method = eval('cv2.TM_CCOEFF_NORMED')

        result = cv2.matchTemplate(picture_array, template, method)
        threshold = 0.8
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

        if max_val >= threshold:
            return max_loc
        else:
            return None
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_find_scene_by_tree():
    scenes = build_scene_tree()

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n))#, cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    def find_match(node, img):
        # for root node
        if node.parent is None:
            for k, v in node.iteritems():
                res = find_match(v, img)
                if res is not None:
                    return res
            return node

        # find in this node
        if node.tmpl is not None:
            s_bgr = cv2.split(node.tmpl) # Blue Green Red
            i_bgr = cv2.split(img)
            weight = (0.3, 0.3, 0.4)
            resbgr = [0, 0, 0]
            for i in range(3): # bgr
                resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
            match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]

            # match = cv2.matchTemplate(img, node.tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
            # found!
            if max_val > 0.7:
                x, y = max_loc
                h, w = node.tmpl.shape[:2]
                cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
                # find in children
                for k, v in node.iteritems():
                    res = find_match(v, img)
                    if res is not None: 
                        return res
                return node

    for name, img in imgs.iteritems():
        cur = find_match(scenes, img)
        print '%20s %s' % (name, cur)
        cv2.imshow('img', img)
        cv2.waitKey()
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def match(self, im0, im1, hm0, hm1):
        viz = False
        mask0 = self.BG0.segment(im0)
        mask1 = self.BG1.segment(im1)

        im0 = im0 * (mask0>1e-10).astype('uint8')[:,:,np.newaxis]
        im1 = im1 * (mask1>1e-10).astype('uint8')[:,:,np.newaxis]

        if viz:
            viz0 = np.copy(im0)
            viz1 = np.copy(im1)
        pts14 = []
        for chan in range(14):
            h0 = cv2.resize(hm0[:,:,chan], (ORIG_SIZE, ORIG_SIZE))
            h1 = cv2.resize(hm1[:,:,chan], (ORIG_SIZE, ORIG_SIZE))
            y0, x0 = argmax_2d(h0)
            y1, x1 = argmax_2d(h1)

            target = take_patch(im0, y0, x0, PATCH_SIZE)
            region = take_patch(im1, y1, x1, REGION_SIZE)

            res = cv2.matchTemplate(region, target, cv2.TM_CCOEFF_NORMED)
            _, _, _, top_left = cv2.minMaxLoc(res)
            top_left = top_left[::-1]
            center_in_region = (top_left[0] + PATCH_SIZE, top_left[1] + PATCH_SIZE)
            center_in_im1 = (center_in_region[0] + y1-REGION_SIZE,
                    center_in_region[1] + x1-REGION_SIZE)

            if viz:
                cv2.circle(viz0, (x0,y0), 3, (0,0,255), -1)
                cv2.circle(viz1, tuple(center_in_im1[::-1]), 3, (0,0,255), -1)
            pts14.append([x0, y0, center_in_im1[1],center_in_im1[0]])
        if viz:
            mask0 = cv2.cvtColor(mask0, cv2.COLOR_GRAY2RGB).astype('uint8')
            mask1 = cv2.cvtColor(mask1, cv2.COLOR_GRAY2RGB).astype('uint8')
            viz = np.concatenate((mask0, viz0,viz1, mask1),axis=1)
            cv2.imshow("v", viz)
            cv2.waitKey(1)
        return np.array(pts14)
        return viz, np.array(pts14)

        #rv = np.copy(region)
        #rv[center_in_region[0],center_in_region[1]] = (0,0,255)
        #tv = cv2.resize(target, tuple(region.shape[:2][::-1]))

        #hv = np.zeros((region.shape), dtype='float32')
        #res = res - res.min()
        #res = res / res.max() * 255
        #res = cv2.cvtColor(res, cv2.COLOR_GRAY2RGB)
        #hv[PATCH_SIZE:PATCH_SIZE+res.shape[0],PATCH_SIZE:PATCH_SIZE+res.shape[1],:] = res
        #region = np.concatenate((region, rv, tv, hv), axis=1)
        #cv2.imwrite("patchmatch/region{}.png".format(chan), region)
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_find_scene_by_tree():
    scenes = build_scene_tree()

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n))#, cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    def find_match(node, img):
        # for root node
        if node.parent is None:
            for k, v in node.iteritems():
                res = find_match(v, img)
                if res is not None:
                    return res
            return node

        # find in this node
        if node.tmpl is not None:
            s_bgr = cv2.split(node.tmpl) # Blue Green Red
            i_bgr = cv2.split(img)
            weight = (0.3, 0.3, 0.4)
            resbgr = [0, 0, 0]
            for i in range(3): # bgr
                resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
            match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]

            # match = cv2.matchTemplate(img, node.tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
            # found!
            if max_val > 0.7:
                x, y = max_loc
                h, w = node.tmpl.shape[:2]
                cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
                # find in children
                for k, v in node.iteritems():
                    res = find_match(v, img)
                    if res is not None: 
                        return res
                return node

    for name, img in imgs.iteritems():
        cur = find_match(scenes, img)
        print '%20s %s' % (name, cur)
        cv2.imshow('img', img)
        cv2.waitKey()
项目:beryl    作者:DanielJDufour    | 项目源码 | 文件源码
def click_image(image, notify=True):

    if notify:
        _notify("starting to click " + image)

    if isinstance(image, str) or isinstance(image, unicode):
        template = cv2.imread(image, 0)
    elif isinstance(image, PngImageFile):
        pass # need to convert to cv2 image type

    sleep(2)

    #GET SCREENSHOT
    call(["gnome-screenshot", "--file=/tmp/beryl.png"])
    sleep(1)

    #FIND LOCATION OF NAME
    source = cv2.imread('/tmp/beryl.png', 0)

    points = []
    w, h = template.shape[::-1]
    methods = [cv2.TM_CCOEFF,cv2.TM_CCOEFF_NORMED,cv2.TM_CCORR,cv2.TM_CCORR_NORMED,cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED]
    for method in methods:
        # Apply Template Matching
        result = cv2.matchTemplate(source.copy(), template, method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

        #If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        #bottom_right = (top_left[0] + w, top_left[1] + h)
        # (x,y)
        point = (  top_left[0] + (float(w)/2), top_left[1] + (float(h)/2)  )
        points.append(point)

    best_point = sorted([(point, avg_distance(point, points)) for point in points], key=lambda tup: tup[1])[0][0]

    click_location(best_point)

    if notify:
        _notify("finished clicking image")