Python cv2 模块,pointPolygonTest() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用cv2.pointPolygonTest()

项目:hand-gesture-recognition-opencv    作者:mahaveerverma    | 项目源码 | 文件源码
def mark_hand_center(frame_in,cont):    
    max_d=0
    pt=(0,0)
    x,y,w,h = cv2.boundingRect(cont)
    for ind_y in xrange(int(y+0.3*h),int(y+0.8*h)): #around 0.25 to 0.6 region of height (Faster calculation with ok results)
        for ind_x in xrange(int(x+0.3*w),int(x+0.6*w)): #around 0.3 to 0.6 region of width (Faster calculation with ok results)
            dist= cv2.pointPolygonTest(cont,(ind_x,ind_y),True)
            if(dist>max_d):
                max_d=dist
                pt=(ind_x,ind_y)
    if(max_d>radius_thresh*frame_in.shape[1]):
        thresh_score=True
        cv2.circle(frame_in,pt,int(max_d),(255,0,0),2)
    else:
        thresh_score=False
    return frame_in,pt,max_d,thresh_score

# 6. Find and display gesture
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def blendImages(src, dst, mask, featherAmount=0.2):
    #indeksy nie czarnych pikseli maski
    maskIndices = np.where(mask != 0)
    #te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)
    maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))
    faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)
    featherAmount = featherAmount * np.max(faceSize)

    hull = cv2.convexHull(maskPts)
    dists = np.zeros(maskPts.shape[0])
    for i in range(maskPts.shape[0]):
        dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)

    weights = np.clip(dists / featherAmount, 0, 1)

    composedImg = np.copy(dst)
    composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]

    return composedImg

#uwaga, tutaj src to obraz, z ktorego brany bedzie kolor
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def find_intersect(image, contours, row, direction, center_col=None):
        """
            Find the intersection from a given centerline to the first
            contours to the left and right
        """
        if center_col is not None:
            col = center_col
        else:
            col = image.shape[1] / 2
        intersect = None
        i_contour = None
        while intersect is None:
            for i, contour in enumerate(contours):
                if cv2.pointPolygonTest(contour, (col, row), False) >= 0:
                    intersect = col
                    i_contour = i
                    break
            col = col + direction
            if col < 0 or col > image.shape[1]:
                break

        return i_contour, intersect
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def forward_intersect(image, contours, center_col=None):
        """
            Find if there is a contour intersect forward
        """
        if center_col is not None:
            col = center_col
        else:
            col = image.shape[1] / 2
        intersect = None
        i_contour = None
        row = 0
        while intersect is None:
            for i, contour in enumerate(contours):
                if cv2.pointPolygonTest(contour, (col, row), False) >= 0:
                    intersect = row
                    i_contour = i
                    break
            row += 1
            if row > image.shape[0]:
                break

        if intersect is None:
            intersect = image.shape[0]
        return {'contour': i_contour, 'distance': intersect}
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def CloseInContour( mask, element ):
    large = 0
    result = mask
    _, contours, _ = cv2.findContours(result,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #find the biggest area
    c = max(contours, key = cv2.contourArea)

    closing = cv2.morphologyEx(result, cv2.MORPH_CLOSE, element)
    for x in range(mask.shape[0]):
        for y in range(mask.shape[1]):
             pt = cv2.pointPolygonTest(c, (x, y), True)
             #pt = cv2.pointPolygonTest(c, (x, y), False)
             if pt > 3:
                result[x][y] = closing[x][y]
    return result.astype(np.float32)
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def CloseInContour( mask, element ):
        large = 0
        result = mask
        _, contours, _ = cv2.findContours(result,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        #find the biggest area
        c = max(contours, key = cv2.contourArea)

        closing = cv2.morphologyEx(result, cv2.MORPH_CLOSE, element)
        for x in range(mask.shape[0]):
            for y in range(mask.shape[1]):
                 #pt = cv2.pointPolygonTest(c, (x, y), True)
                 pt = cv2.pointPolygonTest(c, (x, y), False)
                 if pt == 1:
                    result[x][y] = closing[x][y]
        return result.astype(np.float32)
项目:2017-Vision    作者:RoboticsTeam4904    | 项目源码 | 文件源码
def distToPolygon(contour, polygon):
    tests = [cv2.pointPolygonTest(polygon, (point[0][0], point[0][1]), True) for point in contour]
    return np.average(np.absolute(tests))
项目:Vec-Lib    作者:vladan-jovicic    | 项目源码 | 文件源码
def find_color(self):
        contour = np.array(self.contour)
        # pdb.gimp_message(str(contour))
        if len(contour) <= 1:
            return 0, 0, 0

        # try with 9 directions
        dirx = [1, 1, 1, 0, 0, -1, -1, -1]
        diry = [1, 0, -1, 1, -1, 1, 0, -1]

        s_area = cv2.contourArea(contour, True)
        possible_colors = {}
        for point in contour:
            for dx, dy in zip(dirx, diry):
                new_cx, new_cy = int(point[0] + dx), int(point[1] + dy)
                # try it, try it
                # pdb.gimp_message("before polygon test")
                # dist = self.is_point_inside([new_cx, new_cy], index)
                dist = cv2.pointPolygonTest(contour, (new_cx, new_cy), True)
                # check the orientation of contour

                # pdb.gimp_message("after polygon test: " + str(dist))
                if dist > 0:
                    # pdb.gimp_message("Point " + str((new_cx, new_cx)) + " is inside")
                    # voila
                    # pdb.gimp_message("we have just to check the color")

                    b, g, r = self.image[new_cy, new_cx]
                    if (b, g, r) in possible_colors.keys():
                        possible_colors[(b, g, r)] += 1
                    else:
                        possible_colors[(b, g, r)] = 1
                # return self.image[new_cx, new_cy]

        max_occ, majority_color = 0, (0, 0, 0)
        for key, val in possible_colors.items():
            # pdb.gimp_message("Color " + str(key) + " appears " + str(val))
            if val > max_occ:
                max_occ, majority_color = val, key

        return majority_color[2], majority_color[1], majority_color[0]
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def classify_monitor_contour_set(contours):
    '''Not a general purpose function : given the expectation of a set of strongly related contours for one monitor...'''
    # First pass : compute the center of mass of every contour
    classified = {}
    for (i,c) in enumerate(contours):
        classified[i] = {}
        classified[i]['contour'] = c
        moments = M = cv2.moments(c)
        classified[i]['com'] = (int(M['m10']/M['m00']), int(M['m01']/M['m00']))
        rect = contour_to_monitor_coords(c)
        (maxWidth, maxHeight, dest, Mwarp) = compute_warp(rect)
        classified[i]['rect'] = rect
        classified[i]['maxWidth'] = maxWidth
        classified[i]['maxHeight'] = maxHeight
        classified[i]['dest'] = dest
        classified[i]['Mwarp'] = Mwarp
    # Second pass : establish if c-o-m of every contour is within the first contour
    reference_contour = contours[0]
    for (i,c) in enumerate(contours):
        classified[i]['coherent'] = cv2.pointPolygonTest(reference_contour, classified[i]['com'], False)
    # Final pass : report on the set
    print('$'*80)
    for (i,c) in enumerate(contours):
        print('%d : c-o-m %s : coherent : %d mw %d mh %d' % (i,
                                                             classified[i]['com'],
                                                             classified[i]['coherent'],
                                                             classified[i]['maxWidth'],
                                                             classified[i]['maxHeight'],
        ))
    print('$'*80)
    # From the contours coherent to the reference contour, build an average/best estimator
    count = 0
    rect = np.zeros((4, 2), dtype = "float32")            
    for (i,c) in enumerate(contours):
        if classified[i]['coherent'] == 1:
            count += 1
            for j in range(0,4):
                rect[j] += classified[i]['rect'][j]
    #pdb.set_trace()
    for j in range(0,4):
        # BUG to show Alison
        # rect[j] = (rect[j]/1.0*count).astype('uint8')
        rect[j] = (rect[j]/(1.0*count)).astype('uint32')
    time.sleep(2.5)
    return rect