Python cv2 模块,HoughLinesP() 实例源码

我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用cv2.HoughLinesP()

项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_lines(img):
  edges = cv2.Canny(img,100,200)
  threshold = 60
  minLineLength = 10
  lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, 0, minLineLength, 20);
  if (lines is None or len(lines) == 0):
      return

  #print lines
  for line in lines[0]:
    #print line
    cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2)
  cv2.imwrite("line_edges.jpg", edges)
  cv2.imwrite("lines.jpg", img)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    """
    `img` should be the output of a Canny transform.
    Returns an image with hough lines drawn.
    """
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
    line_img = np.zeros((*img.shape, 3), dtype = np.uint8)
    draw_lines(line_img, lines)
    return line_img


# Python 3 has support for cool math symbols.
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def process_img(img):
    original_image=img
    processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    copy=processed_img
    vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
    processed_img = roi(processed_img, np.int32([vertices]))
    verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
    platform = roi(copy, np.int32([verticesP]))
    #                       edges
    #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
    #draw_lines(processed_img,lines)
    #draw_lines(original_image,lines)

    #Platform lines
    #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
    ret,thresh = cv2.threshold(platform,127,255,0)
    im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
    try:
        platformpos=contours[0][0][0]
    except:
        platformpos=[[0]]
    circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
                               param1=90, param2=5, minRadius=1, maxRadius=3)

    ballpos=draw_circles(original_image,circles=circles)

    return processed_img,original_image,platform,platformpos,ballpos
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def houghTransform(image, edges):
    rho = 2
    theta = np.pi/180
    threshold = 15
    min_line_length = 40
    max_line_gap = 20

    line_image = np.copy(image)*0 #creating a blank to draw lines on

    # Run Hough on edge detected image
    lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)

    # Iterate over the output "lines" and draw lines on the blank
    for line in lines:
        for x1,y1,x2,y2 in line:
            cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)

    # Create a "color" binary image to combine with line image
    color_edges = np.dstack((edges, edges, edges))

    # Draw the lines on the edge image
    combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)

    return combo
项目:hazcam    作者:alex-sherman    | 项目源码 | 文件源码
def update_edge_mask(self, previous_mask, previous_line, slope_sign, thrs1, thrs2, debug):
        lines = cv2.HoughLinesP(self.edge, 1, np.pi / 180, 70, minLineLength = 10, maxLineGap = 200)
        lines = filter_lines(lines, self.vanishing_height, self.edge.shape[0], slope_sign)
        self.lines.extend(lines)
        mask = np.zeros(self.edge.shape, np.uint8)
        for line in lines:
            x1,y1,x2,y2 = line
            cv2.line(mask, (x1,y1),(x2,y2), 255, MASK_WIDTH)
        mask = cv2.addWeighted(mask, MASK_WEIGHT, previous_mask, 1 - MASK_WEIGHT, 0)
        #self.current_mask *= int(255.0 / self.current_mask.max())
        previous_mask = mask.copy()
        _, mask = cv2.threshold(mask, 40, 255, cv2.THRESH_BINARY)
        masked_edges = cv2.morphologyEx(cv2.bitwise_and(self.edge, self.edge, mask = mask), cv2.MORPH_CLOSE, np.array([[1] * EDGE_DILATION] *EDGE_DILATION))
        lines2 = cv2.HoughLinesP(masked_edges, 1, np.pi / 180, 70, minLineLength = 10, maxLineGap = 200)
        lines2 = filter_lines(lines2, self.vanishing_height, self.edge.shape[0], slope_sign)
        self.lines2.extend(lines2)
        for line in lines2:
            x1,y1,x2,y2 = line
            cv2.line(mask, (x1,y1),(x2,y2), 255, MASK_WIDTH)
            previous_line[0] = add(previous_line[0], (x2,y2))
            previous_line[1] = add(previous_line[1], (x_at_y(self.edge.shape[0]*0.6, x1, y1, x2, y2), self.edge.shape[0]*0.6))
        previous_line[0] = scale(previous_line[0], 1.0 / (len(lines2) + 1))
        previous_line[1] = scale(previous_line[1], 1.0 / (len(lines2) + 1))
        return masked_edges, mask, previous_mask, previous_line
项目:Simple-Lane-Detection-System    作者:shivamsardana    | 项目源码 | 文件源码
def detect_lines(img_canny_masked):

    """ Runs the Hough transform to detect lines in the input image"""
    # Apply HoughLines to extract lines

    rho_res = .1 # [pixels]
    theta_res = np.pi / 180.  # [radians]
    threshold = 7  # [# votes]
    min_line_length = 11 # [pixels]
    max_line_gap = 1  # [pixels]
    lines = cv2.HoughLinesP(img_canny_masked, rho_res, theta_res, threshold, np.array([]),
                            minLineLength=min_line_length, maxLineGap=max_line_gap)
    return lines
项目:AutonomousParking    作者:jovanduy    | 项目源码 | 文件源码
def hough_lines(self):
       """ This function uses the Hough Line Transform function to identify and visualize lines in our binary image."""

       lines = cv2.HoughLinesP(self.binary_image, rho=5, theta=np.deg2rad(10), threshold=100, minLineLength=25, maxLineGap=0)
       lines_filtered = []
       if not lines is None:
            for x1,y1,x2,y2 in lines[0]:
                if y1 >100 and y2 > 100 and abs(y1 - y2) > 10:
                    # if the line is actually on the ground (not noise)
                    # and is more than 10 pixels vertically, include it
                    lines_filtered.append((x1,y1,x2,y2))
       return lines_filtered
项目:WebAct    作者:CreatCodeBuild    | 项目源码 | 文件源码
def line_detect(im):
    '''
    ?????????????????????
    '''
    v_lines = cv2.HoughLinesP(im, 1, np.pi,
                              threshold=50,
                              minLineLength=50,
                              maxLineGap=0)

    h_lines = cv2.HoughLinesP(im, 1, np.pi/2,
                              threshold=100,
                              minLineLength=50,
                              maxLineGap=0)

    return v_lines, h_lines
项目:WebAct    作者:CreatCodeBuild    | 项目源码 | 文件源码
def line_detect(im):
    '''
    ?????????????????????
    '''
    v_lines = cv2.HoughLinesP(im, 1, np.pi,
                              threshold=50,
                              minLineLength=50,
                              maxLineGap=0)

    h_lines = cv2.HoughLinesP(im, 1, np.pi/2,
                              threshold=100,
                              minLineLength=50,
                              maxLineGap=0)

    return v_lines, h_lines
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    """
    `img` should be the output of a Canny transform.

    Returns an image with hough lines drawn.
    """
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
    return lines
项目:airport    作者:cfircohen    | 项目源码 | 文件源码
def FindInternalBox(bw):
  """Finds where the puzzle card is located.

  Detects all vertical and horizontal lines, and returns the largest
  contour that bounds them"""

  # Invert colors. HoughLines searches white lines on black background
  target = 255 - bw.copy()
  DebugShow(target)

  lines = cv2.HoughLinesP(target, 1, np.pi / 180, 100, 100, 10)
  if lines is None:
    logging.debug("HoughLinesP failed")
    return None

  logging.debug("Found {} lines using HoughLinesP".format(len(lines)))

  lines_image = np.zeros_like(target)
  for line in lines:
    for x1, y1, x2, y2 in line:
      if abs(x1 - x2) < 20:
        # vertical line
        x = min(x1, x2)
        cv2.line(lines_image, (x, y1), (x, y2), 255, 0)
      if abs(y1 - y2) < 20:
        y = min(y1, y2)
        cv2.line(lines_image, (x1, y), (x2, y), 255, 0)

  kernel = np.ones((5, 5), np.uint8)
  lines_image = cv2.dilate(lines_image, kernel, iterations=2)
  DebugShow(lines_image)

  return FindExternalContour(lines_image)
项目:CarLaneDetection    作者:leftthomas    | 项目源码 | 文件源码
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
                            maxLineGap=max_line_gap)
    line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
    # draw_lines(line_img, lines)
    draw_lanes(line_img, lines)
    return line_img
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def houghTransformAndRegionSelect(image, edges):
    rho = 1
    theta = np.pi/180
    threshold = 1
    min_line_length = 5
    max_line_gap = 3


    # Next we'll create a masked edges image using cv2.fillPoly()
    mask = np.zeros_like(edges)
    ignore_mask_color = 255

    # This time we are defining a four sided polygon to mask
    imshape = image.shape
    vertices = np.array([[(0,imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    masked_edges = cv2.bitwise_and(edges, mask)

    line_image = np.copy(image)*0

    # Run Hough on edge detected image
    # Output "lines" is an array containing endpoints of detected line segments
    lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)

    # Iterate over the output "lines" and draw lines on a blank image
    for line in lines:
        for x1,y1,x2,y2 in line:
            cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)

    # Create a "color" binary image to combine with line image
    color_edges = np.dstack((edges, edges, edges))

    # Draw the lines on the edge image
    lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)

    return lines_edges
项目:suiron    作者:kendricktan    | 项目源码 | 文件源码
def get_lane_lines(inframe):
    frame = inframe.copy()
    ret_frame = np.zeros(frame.shape, np.uint8)

    # We converted it into RGB when we normalized it
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    gray = get_median_blur(gray)
    canny = get_canny(gray)

    # Hough lines
    # threshold = number of 'votes' before hough algorithm considers it a line
    lines = cv2.HoughLinesP(canny, 1, np.pi/180, threshold=25, minLineLength=40, maxLineGap=100)

    try:
        r = lines.shape[0]
    except AttributeError:
        r = 0

    for i in range(0):
        for x1, y1, x2, y2 in lines[i]:
            # Degrees as its easier for me to conceptualize
            angle = math.atan2(y1-y2, x1-x2)*180/np.pi

            # If it looks like a left or right lane
            # Draw it onto the new image
            if 100 < angle < 170 or -170 < angle < -100:
                cv2.line(ret_frame, (x1, y1), (x2, y2), (255, 255, 255), 10)

    return ret_frame
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def process_img(original_image):
    processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    vertices = np.array([[10,500],[10,300], [300,200], [500,200], [800,300], [800,500]], np.int32)
    processed_img = roi(processed_img, [vertices])

    #                       edges
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
    draw_lines(processed_img,lines)
    return processed_img
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def process_img(image):
    original_image = image
    # edge detection
    processed_img =  cv2.Canny(image, threshold1 = 200, threshold2=300)

    processed_img = cv2.GaussianBlur(processed_img,(5,5),0)

    vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
                         ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:        
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180,      20,       15)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1,m2 = draw_lanes(original_image,lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)


            except Exception as e:
                print(str(e))
    except Exception as e:
        pass

    return processed_img,original_image, m1, m2
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def process_img(image):
    original_image = image
    # convert to gray
    processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # edge detection
    processed_img =  cv2.Canny(processed_img, threshold1 = 200, threshold2=300)

    processed_img = cv2.GaussianBlur(processed_img,(5,5),0)

    vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
                         ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:        
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180,      20,       15)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1,m2 = draw_lanes(original_image,lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)


            except Exception as e:
                print(str(e))
    except Exception as e:
        pass

    return processed_img,original_image, m1, m2
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def process_img(image):
    original_image = image
    # convert to gray
    processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # edge detection
    processed_img =  cv2.Canny(processed_img, threshold1 = 200, threshold2=300)

    processed_img = cv2.GaussianBlur(processed_img,(5,5),0)

    vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
                         ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:        
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180,      20,       15)
    try:
        l1, l2 = draw_lanes(original_image,lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)


            except Exception as e:
                print(str(e))
    except Exception as e:
        pass

    return processed_img,original_image
项目:Self-Driving-Car-ND-Predict-Steering-Angle-with-CV    作者:sjamthe    | 项目源码 | 文件源码
def lane_lines(img, lines):
    """
    `lines` should be the output of a cv2.HoughLinesP.

    Returns left lane and right lane in a list.
    """
    global debug
    global image_int

    #if(lines is Null):
     #   print ("Error: No Houghlines detected for image ",image_cnt)

     if(lines is None):
        return None
项目:Self-Driving-Car-ND-Predict-Steering-Angle-with-CV    作者:sjamthe    | 项目源码 | 文件源码
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    """
    `img` should be the output of a Canny transform.
    lines are drawn between y values of vertices_top and vertices_bottom

    Returns an image with hough lines drawn.
    """
    global angle

    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
    new_lines = lane_lines(img, lines)

    #here we should store historic lane_lines together
    concat_lines = concat_hist_lines(new_lines)

    left_lane = lines_to_lane(img, concat_lines[0],'left')
    right_lane = lines_to_lane(img, concat_lines[1],'right')

    [left_lane,right_lane] = are_lanes_ok([left_lane,right_lane])
    angle = steering_angle(img, [left_lane,right_lane])


    #we need points for draw lanes array
    line_img = np.zeros(img.shape, dtype=np.uint8)

    if(left_lane is  None and right_lane is  None):
        lanes = None
    elif(left_lane is None):
        lanes = np.array([np.array([right_lane])])
    elif(right_lane is None):
        lanes = np.array([np.array([left_lane])])
    else:
        lanes = np.array([np.array([left_lane]),np.array([right_lane])])

    draw_lines(line_img, lanes,thickness=5)

    return line_img


# In[140]:
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_HoughLines(proxy,obj):
    ''' find houghlines '''

    # parameter from obj
    canny1=obj.canny1
    canny2=obj.canny2
    rho=obj.rho
    theta=obj.theta
    threshold=obj.threshold
    minLineLength =obj.minLineLength
    maxLineGap =obj.maxLineGap

    # load the image
    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # find edges
    # naechst zwei zeilen koennen wahrscheinlich weg. #+#
    edges = cv2.Canny(img,canny1,canny2)
    obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,canny1,canny2)
    xsize=img.shape[1]
    ysize=img.shape[0]

    # find lines
    lines = cv2.HoughLinesP(edges,1,np.pi/180*theta,threshold, minLineLength = minLineLength, maxLineGap = maxLineGap)

    k=0
    fclines=[]
    img = 0 *img

    for l in lines:
        k += 1
        [[x1,y1,x2,y2]] = l
        fl=tools.fcline(x1,-y1,x2,-y2)
        fclines.append(fl)       
        print (x1,y1,x2,y2)
        a=cv2.line(img,(x1,y1),(x2,y2),(0,255,0),1)

    # data for following nodes
    obj.Proxy.img=img
    obj.Proxy.fclines=fclines
    obj.Proxy.lines=lines

    # method for extra calculations
    obj.Proxy.__class__.linelengths=property(lambda self: linelengths(self))
    obj.Proxy.__class__.directions=property(lambda self: directions(self))
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def main(filename,canny1=100,canny2=200,rho=1,theta=1, threshold=10, minLineLength =25, maxLineGap =10,
            showimage=False,showimagewithlines=False,newDocument=True):
# def main(f):
    f=filename
    im = cv2.imread(f)
    gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,canny1,canny2)

    xsize=len(im[0])
    ysize=len(im)

#image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]])
    lines = cv2.HoughLinesP(edges,1,np.pi/180*theta,threshold, minLineLength = minLineLength, maxLineGap = maxLineGap)
#   lines = cv2.HoughLinesP(edges,1,np.pi/180,10, minLineLength = 25, maxLineGap = 10)
    #lines = cv2.HoughLinesP(edges,1,np.pi/2,2)[0]

    k=0
    fclines=[]

    for l in lines:
        k += 1
        [[x1,y1,x2,y2]] = l       
        fl=fclinev2(x1,-y1,x2,-y2)
        fclines.append(fl)
        #print (x1,y1,x2,y2)
        a=cv2.line(im,(x1,y1),(x2,y2),(0,255,255),2)

    c=Part.makeCompound(fclines)
    c.Placement.Base=FreeCAD.Vector(-xsize/2*scaler,ysize/2*scaler,0)
    if newDocument:
        d=App.newDocument("HoughLines")
#       App.setActiveDocument("Unnamed1")
#       App.ActiveDocument=d
#       Gui.ActiveDocument=Gui.getDocument("Unnamed1")

    Part.show(c)

    cv2.imwrite('/tmp/out.png',im)

    import Image, ImageGui
    #ImageGui.open(unicode("/tmp/out.png","utf-8"))

    if showimage:
        fimg=App.activeDocument().addObject('Image::ImagePlane','Image 2')
        fimg.Label=f
        fimg.ImageFile = f
        fimg.XSize = xsize*scaler
        fimg.YSize = ysize*scaler
        fimg.Placement.Base.z=-5

    if showimagewithlines:
        fimg=App.activeDocument().addObject('Image::ImagePlane','Image with Houghlines')
        fimg.ImageFile = '/tmp/out.png'
        fimg.XSize = xsize*scaler
        fimg.YSize = ysize*scaler
        fimg.Placement.Base.z=-10
        FreeCADGui.SendMsgToActiveView("ViewFit")

    print ("lines:",k)
项目:line-follower    作者:iseikr    | 项目源码 | 文件源码
def __findLine(self):
        self.__grabImage();

        if(self.currentImage is None):
            #grabbing image failed
            return -2.0

        #Convert to Grayscale
        img = cv2.cvtColor(self.currentImage, cv2.COLOR_BGR2GRAY)

        #Blur to reduce noise
        img = cv2.medianBlur(img,25)

        #Do Thresholding
        h,img = cv2.threshold(img, self.thresh, self.maxValue, cv2.THRESH_BINARY_INV)

        img = cv2.blur(img,(2,2))

        #Make image smaller
        img = cv2.resize(img, (self.horizontalRes, self.verticalRes))
        #org_img = cv2.resize(org_img, (self.horizontalRes, self.verticalRes))

        #Create skeleton
        size = np.size(img)
        skel = np.zeros(img.shape,np.uint8)
        element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
        done = False
        while( not done):
            eroded = cv2.erode(img,element)
            temp = cv2.dilate(eroded,element)
            temp = cv2.subtract(img,temp)
            skel = cv2.bitwise_or(skel,temp)
            img = eroded.copy()
            zeros = size - cv2.countNonZero(img)
            if zeros==size:
                done = True

        #Do Line Detection
        lines = cv2.HoughLinesP(skel,1,np.pi/180,2,
                self.hough_minLineLength,self.hough_maxLineGap)


        #get minimum and maximum x-coordinate from lines
        x_min = self.horizontalRes+1.0
        x_max = -1.0;
    if(lines != None and len(lines[0]) > 0):
        for x1,y1,x2,y2 in lines[0]:
            x_min = min(x_min, x1, x2)
            x_max = max(x_max, x1, x2)
            #cv2.line(org_img,(x1,y1),(x2,y2),(0,255,0),2)

        #write output visualization
        #cv2.imwrite("output-img.png",org_img);

        #find the middle point x of the line and return
        #return -1.0 if no lines found
        if(x_max == -1.0 or x_min == (self.horizontalRes+1.0) ):
            return -1.0 #no line found
        else:
            return (x_min + x_max) / 2.0