Python cv2 模块,fillPoly() 实例源码

我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用cv2.fillPoly()

项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def render_lane(image, corners, ploty, fitx, ):

    _,  src,  dst = perspective_transform(image, corners)
    Minv = cv2.getPerspectiveTransform(dst, src)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts = np.vstack((fitx,ploty)).astype(np.int32).T

    # Draw the lane onto the warped blank image
    #plt.plot(left_fitx, ploty, color='yellow')
    cv2.polylines(color_warp,  [pts],  False,  (0, 255, 0),  10)
    #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 

    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def roi(img,vertices):
    # blank mask:
    mask = np.zeros_like(img)

    # filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, 255)

    # returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:motion-capture    作者:Vermeille    | 项目源码 | 文件源码
def draw_landmarks(frame, lds):
    #cv2.rectangle(frame, (0, 0), (frame.shape[1], frame.shape[0]), (0, 0, 0), -1)
    # Make the eyebrows into a nightmare
    cv2.fillPoly(frame, [lds.get('left_eyebrow')], (68, 54, 39, 128))
    cv2.fillPoly(frame, [lds.get('right_eyebrow')], (68, 54, 39, 128))

    cv2.fillPoly(frame, [lds.get('left_eye')], (150, 54, 39, 128))
    cv2.fillPoly(frame, [lds.get('right_eye')], (150, 54, 39, 128))

    # Gloss the lips
    cv2.fillPoly(frame, [lds.get('top_lip')], (150, 0, 0, 128))
    cv2.fillPoly(frame, [lds.get('bottom_lip')], (150, 0, 0, 128))

    cv2.fillPoly(frame, [lds.get('nose_bridge')], (150, 0, 0, 128))
    cv2.fillPoly(frame, [lds.get('nose_tip')], (150, 0, 0, 128))
    cv2.polylines(frame, [lds.get('chin')], False, (150, 0, 0, 128))
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def findEllipses(edges):
    contours, _ = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    ellipseMask = np.zeros(edges.shape, dtype=np.uint8)
    contourMask = np.zeros(edges.shape, dtype=np.uint8)

    pi_4 = np.pi * 4

    for i, contour in enumerate(contours):
        if len(contour) < 5:
            continue

        area = cv2.contourArea(contour)
        if area <= 100:  # skip ellipses smaller then 10x10
            continue

        arclen = cv2.arcLength(contour, True)
        circularity = (pi_4 * area) / (arclen * arclen)
        ellipse = cv2.fitEllipse(contour)
        poly = cv2.ellipse2Poly((int(ellipse[0][0]), int(ellipse[0][1])), (int(ellipse[1][0] / 2), int(ellipse[1][1] / 2)), int(ellipse[2]), 0, 360, 5)

        # if contour is circular enough
        if circularity > 0.6:
            cv2.fillPoly(ellipseMask, [poly], 255)
            continue

        # if contour has enough similarity to an ellipse
        similarity = cv2.matchShapes(poly.reshape((poly.shape[0], 1, poly.shape[1])), contour, cv2.cv.CV_CONTOURS_MATCH_I2, 0)
        if similarity <= 0.2:
            cv2.fillPoly(contourMask, [poly], 255)

    return ellipseMask, contourMask
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def _get_mask(self, scan, slide, series):
        img, s, o, origShape = scan
        mask = np.zeros((origShape[1], origShape[2]))
        nodules = self._nodule_info[series]
        for nodule in nodules:
            iid, z, edges = nodule
            z = int((z - o[2])/s[2])
            if z == slide:
                if edges.shape[0] > 1:
                    cv.fillPoly(mask, [edges], 255)
                else:
                    #It's a small nodule. Make a circle of radius 3mm
                    edges = np.squeeze(edges)
                    center = tuple(edges)
                    radius = max(3.0/s[0], 3.0/s[1])
                    cv.circle(mask, center, int(radius+1), 255, -1)

        if img.shape[1] != origShape[1] or img.shape[2] != origShape[2]:
            mask = imu.resize_2d(mask, (img.shape[1], img.shape[2]))
        return mask
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def region_of_interest(img, vertices):
    """
    Applies an image mask.
    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    # defining a blank mask to start with
    mask = np.zeros_like(img)
    # defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255
    # filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    # returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def process_one(image_dir, page_dir, output_dir, basename, colormap, color_labels):
    image_filename = os.path.join(image_dir, "{}.jpg".format(basename))
    page_filename = os.path.join(page_dir, "{}.xml".format(basename))

    page = PAGE.parse_file(page_filename)
    text_lines = [tl for tr in page.text_regions for tl in tr.text_lines]
    graphic_regions = page.graphic_regions
    img = imread(image_filename, mode='RGB')

    gt = np.zeros_like(img[:, :, 0])
    mask1 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in text_lines if 'comment' in tl.id], 1)
    mask2 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in text_lines if not 'comment' in tl.id], 1)
    mask3 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in graphic_regions], 1)
    arr = np.dstack([mask1, mask2, mask3])

    gt_img = convert_array_masks(arr, colormap, color_labels)
    save_and_resize(img, os.path.join(output_dir, 'images', '{}.jpg'.format(basename)))
    save_and_resize(gt_img, os.path.join(output_dir, 'labels', '{}.png'.format(basename)), nearest=True)
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def findSignificantContours(img, sobel_8u, sobel):
    image, contours, heirarchy = cv2.findContours(sobel_8u, \
                                                  cv2.RETR_EXTERNAL, \
                                                  cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    level1 = []
    for i, tupl in enumerate(heirarchy[0]):

        if tupl[3] == -1:
            tupl = np.insert(tupl, 0, [i])
            level1.append(tupl)
    significant = []
    tooSmall = sobel_8u.size * 10 / 100
    for tupl in level1:
        contour = contours[tupl[0]];
        area = cv2.contourArea(contour)
        if area > tooSmall:
            cv2.drawContours(mask, \
                             [contour], 0, (0, 255, 0), \
                             2, cv2.LINE_AA, maxLevel=1)
            significant.append([contour, area])
    significant.sort(key=lambda x: x[1])
    significant = [x[0] for x in significant];
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
    mask = sobel.copy()
    mask[mask > 0] = 0
    cv2.fillPoly(mask, significant, 255, 0)
    mask = np.logical_not(mask)
    img[mask] = 0;

    return img
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def mask(self, size = (151, 151)):
    """Returns a binary mask for the worm shape

    Arguments:
      size (tuple ro array): size of the mask

    Returns:
      array: mask of worm shape
    """

    mask = np.zeros(tuple(size));
    xyl, xyr, xym = self.sides();

    for i in range(self.npoints-1):
      poly = np.array([xyl[i,:], xyr[i,:], xyr[i+1,:], xyl[i+1,:]], dtype = np.int32)
      cv2.fillPoly(mask, [poly], 1);

    return np.asarray(mask, dtype = bool)
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def mask(self, size = (151, 151)):
    """Returns a binary mask for the worm shape

    Arguments:
      size (tuple ro array): size of the mask

    Returns:
      array: mask of worm shape
    """

    mask = np.zeros(tuple(size));
    left, right = self.shape();

    for i in range(self.npoints-1):
      poly = np.array([left[i,:], right[i,:], right[i+1,:], left[i+1,:]], dtype = np.int32)
      cv2.fillPoly(mask, [poly], 1);

    return np.asarray(mask, dtype = bool)
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def mask(self, size = (151, 151)):
    """Returns a binary mask for the worm shape

    Arguments:
      size (tuple ro array): size of the mask

    Returns:
      array: mask of worm shape
    """

    mask = np.zeros(tuple(size));
    left, right = self.shape();

    for i in range(self.npoints-1):
      poly = np.array([left[i,:], right[i,:], right[i+1,:], left[i+1,:]], dtype = np.int32)
      cv2.fillPoly(mask, [poly], 1);

    return np.asarray(mask, dtype = bool)
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def mask(self, size = (151, 151)):
    """Returns a binary mask for the worm shape

    Arguments:
      size (tuple ro array): size of the mask

    Returns:
      array: mask of worm shape
    """

    mask = np.zeros(tuple(size));
    left, right = self.shape();

    for i in range(self.npoints-1):
      poly = np.array([left[i,:], right[i,:], right[i+1,:], left[i+1,:]], dtype = np.int32)
      cv2.fillPoly(mask, [poly], 1);

    return np.asarray(mask, dtype = bool)
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def region_of_interest(img, vertices):
    """
    Applies an image mask.

    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    #defining a blank mask to start with
    mask = np.zeros_like(img)   

    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255

    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, ignore_mask_color)

    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def load_contour(contour, img_path):
    filename = "IM-%s-%04d.dcm" % (SAX_SERIES[contour.case], contour.img_no)
    full_path = os.path.join(img_path, contour.case, filename)
    f = dicom.read_file(full_path)
    ctrs = np.loadtxt(contour.ctr_path, delimiter=" ").astype(np.int)
    label = np.zeros(f.pixel_array.shape, dtype=np.uint8)
    cv2.fillPoly(label, [ctrs], 255)
    img,lab = getAlignImg(f,label);
    lx,ly = img.shape;
    assert(lx==ly);
    xm,ym = np.where(lab>127);
    if xm.size<30:
        xm,ym = lx//2,ly//2;
    xm = np.mean(xm);
    ym = np.mean(ym);
    delta = int(lx*0.62)//2;#cut middle 160x160 from 256x256 for sunny brook data
    assert(delta<xm and delta<ym);
    xm,ym,delta = int(xm),int(ym),int(delta);
    img = img[xm-delta:xm+delta,ym-delta:ym+delta];
    lab = lab[xm-delta:xm+delta,ym-delta:ym+delta];
    return cv2.resize(img, (SZ,SZ)), cv2.resize(lab, (SZ,SZ))
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def contour_mask(self, contour):
        """ Generates a binary image with only the given contour filled in. """
        # fill in new data
        new_data = np.zeros(self.data.shape)
        num_boundary = contour.boundary_pixels.shape[0]
        boundary_px_ij_swapped = np.zeros([num_boundary, 1, 2])
        boundary_px_ij_swapped[:, 0, 0] = contour.boundary_pixels[:, 1]
        boundary_px_ij_swapped[:, 0, 1] = contour.boundary_pixels[:, 0]
        cv2.fillPoly(
            new_data, pts=[
                boundary_px_ij_swapped.astype(
                    np.int32)], color=(
                BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL))
        orig_zeros = np.where(self.data == 0)
        new_data[orig_zeros[0], orig_zeros[1]] = 0
        return BinaryImage(new_data.astype(np.uint8), frame=self._frame)
项目:Self-Driving-Car-ND-Predict-Steering-Angle-with-CV    作者:sjamthe    | 项目源码 | 文件源码
def region_of_interest(img, vertices):
    """
    Applies an image mask.

    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    #defining a blank mask to start with
    mask = np.zeros_like(img)

    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255

    #filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, ignore_mask_color)

    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def process_one(image_filename, output_dir, basename):
    page = PAGE.parse_file(get_page_filename(image_filename))
    text_lines = [tl for tr in page.text_regions for tl in tr.text_lines]
    img = imread(image_filename, mode='RGB')
    gt = np.zeros_like(img)
    cv2.fillPoly(gt, [PAGE.Point.list_to_cv2poly(tl.coords) for tl in text_lines], DRAWING_COLOR)
    save_and_resize(img, os.path.join(output_dir, 'images', '{}.jpg'.format(basename)))
    save_and_resize(gt, os.path.join(output_dir, 'labels', '{}.png'.format(basename)), nearest=True)

    classes = np.stack([(0, 0, 0), DRAWING_COLOR])
    np.savetxt(os.path.join(output_dir, 'classes.txt'), classes, fmt='%d')
项目:Dstl-Satellite-Imagery-Feature-Detection    作者:DeepVoltaire    | 项目源码 | 文件源码
def _plot_mask_from_contours(raster_img_size, contours, class_value=1):
    """
    Creates a class mask (0 and 1s) from lists of exterior and interior polygon coordinates.
    """
    img_mask = np.zeros(raster_img_size, np.uint8)
    if contours is None:
        return img_mask
    perim_list, interior_list = contours
    cv2.fillPoly(img_mask, perim_list, class_value)
    cv2.fillPoly(img_mask, interior_list, 0)
    return img_mask
项目:Simple-Lane-Detection-System    作者:shivamsardana    | 项目源码 | 文件源码
def region_of_interest(img, vertices):
    """

    Applies an image mask.

    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """

    # defining a blank mask to start with

    mask = np.zeros_like(img)

    # defining a 3 channel or 1 channel color to fill the mask with depending on the input image

    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255

    # filling pixels inside the polygon defined by "vertices" with the fill color

    cv2.fillPoly(mask, vertices, ignore_mask_color)

    # returning the image only where mask pixels are nonzero

    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def visualize(hog, grid=(10, 10), radCircle=None):
    '''
    visualize HOG as polynomial around cell center
        for [grid] * cells
    '''
    s0, s1, nang = hog.shape
    angles = np.linspace(0, np.pi, nang + 1)[:-1]
    # center of each sub array:
    cx, cy = s0 // (2 * grid[0]), s1 // (2 * grid[1])
    # max. radius of polynomial around cenetr:
    rx, ry = cx, cy
    # for drawing a position indicator (circle):
    if radCircle is None:
        radCircle = max(1, rx // 10)
    # output array:
    out = np.zeros((s0, s1), dtype=np.uint8)
    # point of polynomial:
    pts = np.empty(shape=(1, 2 * nang, 2), dtype=np.int32)
    # takes grid[0]*grid[1] sample HOG values:
    samplesHOG = subCell2DFnArray(hog, lambda arr: arr[cx, cy], grid)
    mxHOG = samplesHOG.max()
    # sub array slices:
    slices = list(subCell2DSlices(out, grid))
    m = 0
    for m, hhh in enumerate(samplesHOG.reshape(grid[0] * grid[1], nang)):
        hhmax = hhh.max()
        hh = hhh / hhmax
        sout = out[slices[m][2:4]]
        for n, (o, a) in enumerate(zip(hh, angles)):
            pts[0, n, 0] = cx + np.cos(a) * o * rx
            pts[0, n, 1] = cy + np.sin(a) * o * ry
            pts[0, n + nang, 0] = cx + np.cos(a + np.pi) * o * rx
            pts[0, n + nang, 1] = cy + np.sin(a + np.pi) * o * ry

        cv2.fillPoly(sout, pts, int(255 * hhmax / mxHOG))
        cv2.circle(sout, (cx, cy), radCircle, 0, thickness=-1)

    return out
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _createMaskFromSelection(self):
        img = self.display.widget.image
        assert img is not None, 'need image defined'

        out = np.zeros(img.shape[1:3], dtype=np.uint8)
        for n, p in enumerate(self.paths):
            assert isinstance(
                p, FreehandItem), 'TODO: make work for other items as well'
            cv2.fillPoly(out, np.array([p.elements()], dtype=np.int32), n + 1)

        self.handleOutput([out.T], title='selection')
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def mask_for_polygons(
        im_size: Tuple[int, int], polygons: MultiPolygon) -> np.ndarray:
    """ Return numpy mask for given polygons.
    polygons should already be converted to image coordinates.
    """
    img_mask = np.zeros(im_size, np.uint8)
    if not polygons:
        return img_mask
    int_coords = lambda x: np.array(x).round().astype(np.int32)
    exteriors = [int_coords(poly.exterior.coords) for poly in polygons]
    interiors = [int_coords(pi.coords) for poly in polygons
                 for pi in poly.interiors]
    cv2.fillPoly(img_mask, exteriors, 1)
    cv2.fillPoly(img_mask, interiors, 0)
    return img_mask
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def get_mask_polygons(polygons, height, width):
    """Turn a list of polygons into a mask image of height by width.
    Each polygon is expressed as a list of [x, y] points."""
    mask = np.zeros((height, width), dtype=np.ubyte)
    cv2.fillPoly(mask, np.int32(polygons), color=255)
    return mask
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def load_contour(contour, img_path):
    filename = 'IM-%s-%04d.dcm' % (sax_series_dict[contour.case], contour.img_no)
    full_path = os.path.join(img_path, contour.case, filename)
    f = dicom.read_file(full_path)
    img = f.pixel_array.astype(np.uint8)
    ctrs = np.loadtxt(contour.ctr_path, delimiter=' ').astype(np.int)
    label = np.zeros_like(img, dtype='uint8')
    cv2.fillPoly(label, [ctrs], 1)
    return cv2.resize(img, (img_size,img_size)), cv2.resize(label, (img_size,img_size))
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def plot_contours(raster_size, contours, class_value=1):
    # __author__ = visoft
    # https://www.kaggle.com/visoft/dstl-satellite-imagery-feature-detection/export-pixel-wise-mask
    img_mask = np.zeros(raster_size, np.uint8)

    if contours is None:
        return img_mask

    perim_list, interior_list = contours
    cv2.fillPoly(img_mask, perim_list, class_value)
    cv2.fillPoly(img_mask, interior_list, 0)

    return img_mask
项目:pictureflow    作者:mentum    | 项目源码 | 文件源码
def apply(self, item, pth):

        item.id += f'-{self.id}'
        img = item.img_mat

        mask = np.zeros(img.shape, np.uint8)
        channel_count = img.shape[2]

        ignore_mask_color = (255,) * channel_count
        cv2.fillPoly(mask, [pth], ignore_mask_color)

        item.img_mat = cv2.bitwise_and(img, mask)
        yield item
项目:CarLaneDetection    作者:leftthomas    | 项目源码 | 文件源码
def roi_mask(img, vertices):
    mask = np.zeros_like(img)

    # defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        mask_color = (255,) * channel_count
    else:
        mask_color = 255

    cv2.fillPoly(mask, vertices, mask_color)
    masked_img = cv2.bitwise_and(img, mask)
    return masked_img
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def houghTransformAndRegionSelect(image, edges):
    rho = 1
    theta = np.pi/180
    threshold = 1
    min_line_length = 5
    max_line_gap = 3


    # Next we'll create a masked edges image using cv2.fillPoly()
    mask = np.zeros_like(edges)
    ignore_mask_color = 255

    # This time we are defining a four sided polygon to mask
    imshape = image.shape
    vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    masked_edges = cv2.bitwise_and(edges, mask)

    line_image = np.copy(image)*0

    # Run Hough on edge detected image
    # Output "lines" is an array containing endpoints of detected line segments
    lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)

    # Iterate over the output "lines" and draw lines on a blank image
    for line in lines:
        for x1,y1,x2,y2 in line:
            cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)

    # Create a "color" binary image to combine with line image
    color_edges = np.dstack((edges, edges, edges))

    # Draw the lines on the edge image
    lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)

    return lines_edges
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def roi(img, vertices):
    mask = np.zeros_like(img)
    cv2.fillPoly(mask, vertices, 255)
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def roi(img, vertices):

    #blank mask:
    mask = np.zeros_like(img)   

    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, 255)

    #returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def roi(img, vertices):

    #blank mask:
    mask = np.zeros_like(img)   

    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, 255)

    #returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def roi(img, vertices):

    #blank mask:
    mask = np.zeros_like(img)   

    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, 255)

    #returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def roi(img, vertices):
    mask = np.zeros_like(img)
    cv2.fillPoly(mask, vertices, 255)
    masked = cv2.bitwise_and(img, mask)
    return masked
项目:LVNet    作者:fperdigon    | 项目源码 | 文件源码
def load_contour(contour, img_path):
    filename = "IM-%s-%04d.dcm" % (SAX_SERIES[contour.case], contour.img_no)
    full_path = os.path.join(img_path, contour.case, filename)
    f = dicom.read_file(full_path)
    img = f.pixel_array.astype(np.int)
    ctrs = np.loadtxt(contour.ctr_path, delimiter=" ").astype(np.int)
    label = np.zeros_like(img, dtype="uint8")
    cv2.fillPoly(label, [ctrs], 1)
    return img, label
项目:img_classifier_prepare    作者:zonekey    | 项目源码 | 文件源码
def maskit(fname):
    m = cv.imread(fname)
    cv.fillPoly(m, [np.array(poly)], BACK_PIX)
    nfname = 'masked-' + fname
    cv.imwrite(nfname, m)
项目:DelaunayVisualization-FacialWarp    作者:sneha-belkhale    | 项目源码 | 文件源码
def drawColoredTriangles(img, triangleList, disp):
    #sort the triangle list by distance from the top left corner in order to get a gradient effect when drawing triangles
    triangleList=sorted(triangleList, cmp=triDistanceSort)
    h, w, c = img.shape
    #get bounding rectangle points of image
    r = (0, 0, w, h)
    #iterate through and draw all triangles in the list
    for idx, t in enumerate(triangleList):
        #grab individual vertex points
        pt1 = [t[0], t[1]]
        pt2 = [t[2], t[3]]
        pt3 = [t[4], t[5]]
        #select a position for displaying the enumerated triangle value
        pos = (t[2], t[3])
        #create the triangle
        triangle = np.array([pt1, pt2, pt3], np.int32)
        #select a color in HSV!! (manipulate idx for cool color gradients)
        color = np.uint8([[[idx, 100, 200]]])
        #color = np.uint8([[[0, 0, idx]]])
        #convert color to BGR
        bgr_color = cv2.cvtColor(color, cv2.COLOR_HSV2BGR)
        color = (int(bgr_color[(0, 0, 0)]), int(bgr_color[(0, 0, 1)]), int(bgr_color[(0, 0, 2)]))

        #draw the triangle if it is within the image bounds
        if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):
            cv2.fillPoly(img, [triangle], color)
            # if display triangle number was selected, display the number.. this helps with triangle manipulation later
            if(disp==1):
                cv2.putText(img, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale=0.3, color=(0, 0, 0))





######################################## example script ########################################
项目:autonomous_driving    作者:StatueFungus    | 项目源码 | 文件源码
def define_roi(self, image, above=0.0, below=0.0, side=0.0):
        '''
            Bildbereiche welche nicht von Interesse sind werden geschwärzt.

            Parameter
            ---------
            image : das zu maskierende Bild
            above (optional) : Float
                Angabe in Prozent, wie viel vom oberen Bild geschwärzt werden soll.
                Default Wert ist 0.0
                >> 1.0 entspricht dabei 100%
            below (optional) : Float
                Angabe in Prozent, wie viel vom unteren Bild geschwärzt werden soll.
                Default Wert ist 0.0
                >> 1.0 entspricht dabei 100%
            side (optional) : Float
                Angabe in Prozent, wie viel von den Seiten des Bildes geschwärzt werden soll.
                Dabei werden die Seiten nicht senkrecht nach unten maskiert, sondern trapezförmig
                zum oberen maskierten Bildrand (above).
                Default Wert ist 0.0
                >> 1.0 entspricht dabei 100%

            Rückgabe
            ---------
            image : maskiertes Bild

        '''
        height, width, channels = image.shape
        color_black = (0, 0, 0)
        # maskiert untere Bildhäfte
        image[height - int((height*below)):height, :] = color_black
        # definiere Punkte für Polygon und maskiert die obere und seitliche Bildhälfte
        pts = np.array([[0, 0], [0, int(height*(above+0.15))], [int(width*side), int(height*above)], [width-int(width*side), int(height*above)], [width, int(height*(above+0.15))], [width, 0]], np.int32)
        cv2.fillPoly(image, [pts], color_black)
        return image
项目:tensorflow_ocr    作者:BowieHsu    | 项目源码 | 文件源码
def generate_rbox(im_size, polys, tags):
    h, w = im_size
    poly_mask = np.zeros((h, w), dtype=np.uint8)
    score_map = np.zeros((h, w), dtype=np.uint8)
    geo_map = np.zeros((h, w, 8), dtype=np.float32)
    # mask used during traning, to ignore some hard areas
    training_mask = np.ones((h, w), dtype=np.uint8)
    for poly_idx, poly_tag in enumerate(zip(polys, tags)):
        poly = poly_tag[0]
        tag = poly_tag[1]

        r = [None, None, None, None]
        for i in range(4):
            r[i] = min(np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
                       np.linalg.norm(poly[i] - poly[(i - 1) % 4]))
        # score map
        # shrinked_poly = shrink_poly(poly.copy(), r).astype(np.int32)[np.newaxis, :, :]

        # close shrink function
        shrinked_poly = poly.astype(np.int32)[np.newaxis, :,:]

        cv2.fillPoly(score_map, shrinked_poly, 1)
        cv2.fillPoly(poly_mask, shrinked_poly, poly_idx + 1)
        # if the poly is too small, then ignore it during training
        poly_h = min(np.linalg.norm(poly[0] - poly[3]), np.linalg.norm(poly[1] - poly[2]))
        poly_w = min(np.linalg.norm(poly[0] - poly[1]), np.linalg.norm(poly[2] - poly[3]))
        if min(poly_h, poly_w) < FLAGS.min_text_size:
            cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
        if tag:
            cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)

        xy_in_poly = np.argwhere(poly_mask == (poly_idx + 1))

        for y, x in xy_in_poly:
            point = np.array([x, y], dtype=np.int32)

            # left
            geo_map[y, x, 0] = valid_link(point, score_map, w, h,'left')
            # left_down
            geo_map[y, x, 1] = valid_link(point, score_map, w, h, 'left_down')
            # left_up
            geo_map[y, x, 2] = valid_link(point, score_map, w, h, 'left_up')
            # right
            geo_map[y, x, 3] = valid_link(point, score_map, w, h,'right')
            # right_down
            geo_map[y, x, 4] = valid_link(point, score_map, w, h,'right_down')
            # right_up
            geo_map[y, x, 5] = valid_link(point, score_map, w, h, 'right_up')
            # up
            geo_map[y, x, 6] = valid_link(point, score_map, w, h, 'up')
            # down
            geo_map[y, x, 7] = valid_link(point, score_map, w, h, 'down')

    return score_map, geo_map, training_mask
项目:lane-detection-raspberry-pi    作者:uvbakutan    | 项目源码 | 文件源码
def project_on_road(self, image_input):
        image = image_input[self.remove_pixels:, :]
        image = self.trans_per(image)
        self.im_shape = image.shape
        self.get_fit(image)

        if self.detected_first & self.detected:
            # create fill image
            temp_filler = np.zeros((self.remove_pixels,self.im_shape[1])).astype(np.uint8)
            filler = np.dstack((temp_filler,temp_filler,temp_filler))

            # create an image to draw the lines on
            warp_zero = np.zeros_like(image).astype(np.uint8)
            color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

            ploty = np.linspace(0, image_input.shape[0]-1, image_input.shape[0] )
            left_fitx = self.best_fit_l[0]*ploty**2 + self.best_fit_l[1]*ploty + self.best_fit_l[2]
            right_fitx = self.best_fit_r[0]*ploty**2 + self.best_fit_r[1]*ploty + self.best_fit_r[2]

            # recast the x and y points into usable format for cv2.fillPoly()
            pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
            pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
            pts = np.hstack((pts_left, pts_right))

            # draw the lane onto the warped blank image
            cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

            # warp the blank back to original image space using inverse perspective matrix (Minv)
            newwarp = cv2.warpPerspective(color_warp, self.Minv, color_warp.shape[-2:None:-1])
            left_right = cv2.warpPerspective(self.left_right, self.Minv, color_warp.shape[-2:None:-1])
            # combine the result with the original image
            left_right_fill = np.vstack((filler,left_right)) 
            result = cv2.addWeighted(left_right_fill,1, image_input, 1, 0)
            result = cv2.addWeighted(result, 1, np.vstack((filler,newwarp)), 0.3, 0)


            # get curvature and offset
            self.calculate_curvature_offset()

            # plot text on resulting image
            img_text = "radius of curvature: " + str(round((self.left_curverad + self.right_curverad)/2,2)) + ' (m)'

            if self.offset< 0:
                img_text2 = "vehicle is: " + str(round(np.abs(self.offset),2)) + ' (m) left of center'
            else:
                img_text2 = "vehicle is: " + str(round(np.abs(self.offset),2)) + ' (m) right of center'

            result2 = cv2.resize(result, (0,0), fx=self.enlarge, fy=self.enlarge)

            cv2.putText(result2,img_text, (15,15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,255),1)
            cv2.putText(result2,img_text2,(15,40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,255),1)

            return result2

        # if lanes were not detected output source image
        else:
            return cv2.resize(image_input,(0,0), fx=self.enlarge, fy=self.enlarge)