Python cv2 模块,COLOR_GRAY2BGR 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.COLOR_GRAY2BGR

项目:duo3d_ros    作者:ethz-ait    | 项目源码 | 文件源码
def extract_checkerboard_and_draw_corners(self, image, chbrd_size):
        image = CvBridge().imgmsg_to_cv2(image, 'mono8')
        image_color = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        ret, corners = cv2.findChessboardCorners(image_color, chbrd_size)

        if not ret:
            cv2.putText(image_color, 'Checkerboard not found', (0, self.res_height - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))

        cv2.drawChessboardCorners(image_color, chbrd_size, corners, ret)

        return ret, corners, image_color
项目:python-examples-cv    作者:tobybreckon    | 项目源码 | 文件源码
def draw_flow(img, flow, step=8):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
    fx, fy = flow[y,x].T
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis

#####################################################################

# define video capture object
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_BlobDetector(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    im=255-im
    im2 = img

    params = cv2.SimpleBlobDetector_Params()

    params.filterByArea = True
    params.minArea = obj.Area

    params.filterByConvexity = True
    params.minConvexity = obj.Convexity/200


    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(im)
    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    if not obj.showBlobs:
        im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        obj.Proxy.img = im_with_keypoints

        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
#           cv2.circle(im,(x,y),4,0,5)
            cv2.circle(im,(x,y),4,255,5)
            cv2.circle(im,(x,y),4,0,5)
            im[y,x]=255
            im[y,x]=0
        obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)

    else:
        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
            cv2.circle(im2,(x,y),4,(255,0,0),5)
            cv2.circle(im2,(x,y),4,(0,0,0),5)
            im2[y,x]=(255,0,0)
            im2[y,x]=(0,0,0)
        obj.Proxy.img = im2
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def read_captured_circles(self):
        img = cv2.cvtColor(self.query, cv2.COLOR_BGR2GRAY)
        img = cv2.medianBlur(img, 7)
        cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 30,
                                   param1=50, param2=30, minRadius=20, maxRadius=50)
        if circles is None:
            return
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            if i[1] < 400:
                continue
            self.circlePoints.append((i[0], i[1]))
        if self._debug:
            self.draw_circles(circles, cimg)
项目:conta-bolas    作者:ocarneiro    | 项目源码 | 文件源码
def play(self):
        key = 0
        j = self
        while key != 27 and key != 1048603:
            j.get_feed()
            self.display = self.image  # TODO copy to add interaction later

            key = cv2.waitKey(10)
            if key >= 0:
                # removes modifiers (NumLock, Caps, etc)
                key = key & 0xEFFFFF
                j.act_on_key(key)

            self.mask_image()
            self.draw_contours()

            cv2.imshow(self.window_name, self.display)

            if self.debug_mode:
                self.debug_image = cv2.cvtColor(self.mask, cv2.COLOR_GRAY2BGR)
                j.draw_sliders(self.debug_image)
                cv2.imshow(DEBUG_WINDOW, self.debug_image)
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def vis_pose(normed_vec):
    import depth 
    origin_pt = np.array([0,0,depth.DepthMap.invariant_depth])
    vec = normed_vec.copy()*50.0
    vec.shape = (-1,3)

    offset_x = Camera.center_x - depth.DepthMap.size2[0]/2
    offset_y = Camera.center_y - depth.DepthMap.size2[1]/2

    img = np.ones((depth.DepthMap.size2[0], depth.DepthMap.size2[1]))*255
    img = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
    for idx, pt3 in enumerate(vec):
        pt = Camera.to2D(pt3+origin_pt)
        pt = (pt[0]-offset_x, pt[1]-offset_y)
        cv2.circle(img, (int(pt[0]), int(pt[1])),2, (255,0,0), -1)
    return img
项目:Two-Stream-Convolutional-Networks    作者:Yorwxue    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2, -1).astype(int)  # ????????????????????????16?reshape?2??array
    fx, fy = flow[y, x].T  # ???????????????
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)  # ????????????2*2???
    lines = np.int32(lines + 0.5)  # ????????????
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))  # ???????????????
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)  # ???????????????????
    return vis
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def renderStarGauss(image, cov, mu, first, scale = 5):
    num_circles = 3
    num_points = 64

    cov = sqrtm(cov)

    num = num_circles * num_points
    pos = np.ones((num, 2))

    for c in range(num_circles):
        r = c + 1
        for p in range(num_points):
            angle = p / num_points * 2 * np.pi
            index = c * num_points + p

            x = r * np.cos(angle)
            y = r * np.sin(angle)

            pos[index, 0] = x * cov[0, 0] + y * cov[0, 1] + mu[0]
            pos[index, 1] = x * cov[1, 0] + y * cov[1, 1] + mu[1]

    #image = image.copy()
    #image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

    if first:
        image = cv2.resize(image, (0, 0), None, scale, scale, cv2.INTER_NEAREST)

    for c in range(num_circles):
        pts = np.array(pos[c * num_points:(c + 1) * num_points, :] * scale + scale / 2, np.int32)
        pts = pts.reshape((-1,1,2))
        cv2.polylines(image, [pts], True, (255, 0, 0))

    return image
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def render(self):
        image = self.calibrator.image.copy()
        sky = cv2.cvtColor(self.renderer.image.copy(), cv2.COLOR_GRAY2BGR)

        correspondences = self.calibrator.getCurrentCorrespondences()

        for correspondence in correspondences:
            if correspondence.pos is not None:
                cv2.circle(image, correspondence.pos, self.circle_radius, self.marked_color)
            if correspondence.altaz is not None:
                self.renderer.highlightStar(sky, correspondence.altaz, self.circle_radius, self.marked_color)

        if self.selected_star is not None:
            if correspondences[self.selected_star].pos is not None:
                cv2.circle(image, correspondences[self.selected_star].pos, self.circle_radius, self.selected_color)
            if correspondences[self.selected_star].altaz is not None:
                self.renderer.highlightStar(sky, correspondences[self.selected_star].altaz, self.circle_radius, self.selected_color)

        cv2.imshow(self.image_window, image)
        cv2.imshow(self.sky_window, sky)
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def findContours(arg_img,arg_canvas, arg_MinMaxArea=False, arg_debug= False):
    image= arg_img.copy()
    #print image
    canvas= arg_canvas.copy()
    if len(image)==3:
        image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
    if sys.version_info.major == 2: 
        ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    if arg_MinMaxArea is not False:
        ctrs = filter(lambda x : arg_MinMaxArea[1]> cv2.contourArea(x) > arg_MinMaxArea[0] , ctrs)

    print '>>> ', len(ctrs)
    for ctr in ctrs:
        print 'Area: ', cv2.contourArea(ctr)
        cv2.drawContours(canvas, [ctr], 0, (0, 128, 255), 3)
    if arg_debug:
        cv2.imwrite('Debug/debug_findContours.jpg',canvas)
    return canvas
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def visulize_matches(matches, k2, k1, img2, img1):
    """ Visualize SIFT keypoint matches."""

    import scipy as sp
    img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
    view[:h1, :w1, :] = img1  
    view[:h2, w1:, :] = img2
    view[:, :, 1] = view[:, :, 0]  
    view[:, :, 2] = view[:, :, 0]

    for m in matches:
        m = m[0]
        # draw the keypoints
        # print m.queryIdx, m.trainIdx, m.distance
        color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
        pt1 = (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1]))
        pt2 = (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1]))

        cv.line(view, pt1, pt2, color)
    return view
项目:ppap_detect    作者:ashitani    | 项目源码 | 文件源码
def set_obj(bg,fg, pos):
    ret=bg.copy()
    mask = fg[:,:,3]
    mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
    mask = mask/ 255.0
    fg_rgb=fg[:,:,:3]
    fg_rgb=fg_rgb/255.0
    ret=ret/255.0

    x,y = pos
    siz=np.shape(fg_rgb)

    ret[y:(y+siz[0]),x:(x+siz[1]),:]*= (1-mask)
    ret[y:(y+siz[0]),x:(x+siz[1]),:]+= fg_rgb
    #ret*=255
    #ret=np.clip(ret,0,255)
    #ret=ret.astype(np.uint8)
    return ret
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def main():
    with tf.device('/cpu:0'):
        z, train = build_graph()
    if FLAGS.ckpt_dir != None:
        with tf.Session() as sess:
            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
            batch_z = np.random.normal(0, 1.0, [FLAGS.batch_size, FLAGS.z_dim]) \
                .astype(np.float32)
            rs = train.eval(feed_dict={z:batch_z})
    print(rs[0].shape)
    overall = []
    for i in range(8):
        temp = []
        for j in range(8):
            temp.append(rs[i * 8 + j])

        overall.append(np.concatenate(temp, axis=1))
    res = np.concatenate(overall, axis=0)
    res = cv2.cvtColor((res)*255, cv2.COLOR_GRAY2BGR)
    cv2.imwrite('sample.png', res)
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def main():
    with tf.device('/cpu:0'):
        z, label, train = build_graph()
    temp = np.repeat(np.arange(10), 8)
    lb = np.zeros((80, 10))
    lb[np.arange(80), temp] = 1
    if FLAGS.ckpt_dir != None:
        with tf.Session() as sess:
            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
            batch_z = np.random.normal(0, 1.0, [FLAGS.batch_size, FLAGS.z_dim]) \
                .astype(np.float32)
            rs = train.eval(feed_dict={z:batch_z, label:lb})
    print(rs[0].shape)
    overall = []
    for i in range(10):
        temp = []
        for j in range(8):
            temp.append(rs[i * 8 + j])

        overall.append(np.concatenate(temp, axis=1))
    res = np.concatenate(overall, axis=0)
    res = cv2.cvtColor((res)*255, cv2.COLOR_GRAY2BGR)
    cv2.imwrite('sample.png', res)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def save_hough(self, lines, clmap):
        """
        :param lines: (rho, theta) pairs
        :param clmap: clusters assigned to lines
        :return: None
        """
        height, width = self.image.shape
        ratio = 600. * (self.step+1) / min(height, width)
        temp = cv2.resize(self.image, None, fx=ratio, fy=ratio,
                          interpolation=cv2.INTER_CUBIC)
        temp = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
        colors = [(0, 127, 255), (255, 0, 127)]

        for i in range(0, np.size(lines) / 2):
            rho = lines[i, 0]
            theta = lines[i, 1]
            color = colors[clmap[i, 0]]
            if theta < np.pi / 4 or theta > 3 * np.pi / 4:
                pt1 = (rho / np.cos(theta), 0)
                pt2 = (rho - height * np.sin(theta) / np.cos(theta), height)
            else:
                pt1 = (0, rho / np.sin(theta))
                pt2 = (width, (rho - width * np.cos(theta)) / np.sin(theta))
            pt1 = (int(pt1[0]), int(pt1[1]))
            pt2 = (int(pt2[0]), int(pt2[1]))
            cv2.line(temp, pt1, pt2, color, 5)

        self.save2image(temp)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def publish_image_t(pub_channel, im, jpeg=False, flip_rb=True): 
    global g_viz_pub
    out = image_t()

    # Populate appropriate fields
    h,w = im.shape[:2]
    c = 3
    out.width, out.height = w, h
    out.row_stride = w*c
    out.utime = 1

    # Propagate encoded/raw data, 
    image = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) if im.ndim == 2 else im
    if flip_rb and im.ndim == 3: 
        rarr, barr = image[:,:,2].copy(), image[:,:,0].copy()
        image[:,:,0], image[:,:,2] = rarr, barr

    # Propagate appropriate encoding 
    if jpeg: 
        out.pixelformat = image_t.PIXEL_FORMAT_MJPEG
    else: 
        out.pixelformat = image_t.PIXEL_FORMAT_RGB

    out.data = cv2.imencode('.jpg', image)[1] if jpeg else image.tostring()
    out.size = len(out.data)
    out.nmetadata = 0

    # Pub
    g_viz_pub.lc.publish(pub_channel, out.encode())
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_color(im, flip_rb=False): 
    if im.ndim == 2: 
        return cv2.cvtColor(im, cv2.COLOR_GRAY2RGB if flip_rb else cv2.COLOR_GRAY2BGR)
    else: 
        return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if flip_rb else im.copy()
项目:demos    作者:jnez71    | 项目源码 | 文件源码
def cdisp(name, F, V, auto=True):
    mag = npl.norm(F, axis=0)
    ang = np.degrees(np.arctan2(F[1], F[0]) + np.pi)
    if auto: val = 255*mag/np.max(mag)
    else: val = 10000*mag
    img = cv2.cvtColor(np.uint8(np.dstack((ang/2, 255*np.ones_like(mag), val))), cv2.COLOR_HSV2BGR)
    img = img + cv2.cvtColor(np.uint8((255/np.max(V))*V), cv2.COLOR_GRAY2BGR)
    img = cv2.resize(np.clip(img, 0, 255), imshow_size)
    cv2.imshow(name, img)
    return img

# Recording tools
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Skeleton(proxy,obj):

    from skimage.morphology import medial_axis

    threshold=0.1*obj.threshold

    try: 
        img2=obj.sourceObject.Proxy.img
        img=img2.copy()
    except: 
        sayexc()
        img=cv2.imread(__dir__+'/icons/freek.png')

    data = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Compute the medial axis (skeleton) and the distance transform
    skel, distance = medial_axis(data, return_distance=True)

    # Distance to the background for pixels of the skeleton
    dist_on_skel = distance * skel

    # entferne ganz duenne linien
    dist_on_skelw =(dist_on_skel >= threshold)* distance

    say("size of the image ...")
    say(dist_on_skelw.shape)
#   skel = np.array(dist_on_skelw,np.uint8) 
    skel = np.array(dist_on_skelw *255/np.max(dist_on_skelw),np.uint8) 
    obj.Proxy.img=cv2.cvtColor(skel*100, cv2.COLOR_GRAY2BGR)
    obj.Proxy.dist_on_skel=dist_on_skelw
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def callback(self, msg):
        #convert image to opencv
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg)
            np_image= np.array(cv_image)
        except CvBridgeError, e:
            print "Could not convert ros message to opencv image: ", e
            return

        #calculate the fft magnitude
        img_float32 = np.float32(np_image)
        dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
        dft_shift = np.fft.fftshift(dft)
        magnitude_spectrum = cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])

        #normalize
        magnitude_spectrum_normalized = magnitude_spectrum / np.sum(magnitude_spectrum)

        #frequency domain entropy (-> Entropy Based Measure of Camera Focus. Matej Kristan, Franjo Pernu. University of Ljubljana. Slovenia)
        fde = np.sum( magnitude_spectrum_normalized * np.log(magnitude_spectrum_normalized) )

        y = 20; x = 20
        text = "fde: {0}   (minimize this for focus)".format(np.sum(fde))
        np_image = cv2.cvtColor(np_image, cv2.COLOR_GRAY2BGR)
        cv2.putText(np_image, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255), thickness=2)
        cv2.imshow(self.windowNameOrig, np_image)
        cv2.waitKey(10)
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def capture_white_circles(self):
        self.prep_for_white_circles()
        img = cv2.cvtColor(self.white_query, cv2.COLOR_BGR2GRAY)
        cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 40,
                                   param1=50, param2=30, minRadius=5, maxRadius=60)
        if circles is None:
            return
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            self.circlePoints.append((i[0], i[1]))
        if self._debug:
            self.draw_circles(circles, cimg)
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def predict_image(flag):
    t_start = cv2.getTickCount()
    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
            loaded_model_json = json_file.read()
    model = model_from_json(loaded_model_json)
    weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
    model.load_weights(weight_list[-1])
    print "[*] model load : %s"%weight_list[-1]
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000 
    print "[*] model loading Time: %.3f ms"%t_total

    imgInput = cv2.imread(flag.test_image_path, 0)
    input_data = imgInput.reshape((1,256,256,1))

    t_start = cv2.getTickCount()
    result = model.predict(input_data, 1)
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
    print "Predict Time: %.3f ms"%t_total

    imgMask = (result[0]*255).astype(np.uint8)
    imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
    _, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
    imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
    # imgZero = np.zeros((256,256), np.uint8)
    # imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
    imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
    output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
    cv2.imwrite(output_path, imgShow)
    print "SAVE:[%s]"%output_path
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def train_visualization_seg(self, model, epoch):
        image_name_list = sorted(glob(os.path.join(self.flag.data_path,'train/IMAGE/*/*.png')))
        print image_name_list

        image_name = image_name_list[-1]
        image_size = self.flag.image_size

        imgInput = cv2.imread(image_name, self.flag.color_mode)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_size,image_size,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print "[*] Predict Time: %.3f ms"%t_total

        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.4, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1)
项目:DrosophilaCooperative    作者:avaccari    | 项目源码 | 文件源码
def processFrame(self):
        # If we are enhancing the image
        if self.enhance:
            # Frangi vesselness to highlight tubuar structures
            gray = cv2.cvtColor(self.sourceFrame, cv2.COLOR_BGR2GRAY)
            tub = tubes(gray, [5, 12])
            tubular = cv2.cvtColor(tub, cv2.COLOR_GRAY2BGR)

            # Merge with original to ennhance tubular structures
            high = 0.3
            rest = 1.0 - high
            colorized = cv2.addWeighted(self.sourceFrame, rest, tubular, high, 0.0)
    #        colorized = cv2.add(self.sourceFrame, tubular)

            # Tile horizontally
            self.processedFrame = np.concatenate((self.sourceFrame,
                                                  tubular,
                                                  colorized),
                                                 axis=1)
        else:
            self.processedFrame = self.sourceFrame;

        self.workingFrame = self.processedFrame.copy()

        # If we are tracking, track and show analysis
        if self.tracking is True:
            self.trackObjects()
            self.showBehavior()
项目:kaggle-carvana    作者:ematvey    | 项目源码 | 文件源码
def mask_to_bgr(mask, B, G, R):
    return cv2.cvtColor((mask / mask.max()).astype(np.uint8), cv2.COLOR_GRAY2BGR) * np.array([B, G, R], dtype=np.uint8)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def build_mask(self, image):
        """ Build the mask to find the path edges """
        kernel = np.ones((3, 3), np.uint8)
        img = cv2.bilateralFilter(image, 9, 75, 75)
        img = cv2.erode(img, kernel, iterations=1)

        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, self.lower_gray, self.upper_gray)

        mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        mask2 = cv2.erode(mask2, kernel)
        mask2 = cv2.dilate(mask2, kernel, iterations=1)

        return mask2
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def drawChessboard(self, img=None):
        '''
        draw a grid fitting to the last added image
        on this one or an extra image
        img == None
            ==False -> draw chessbord on empty image
            ==img
        '''
        assert self.findCount > 0, 'cannot draw chessboard if nothing found'
        if img is None:
            img = self.img
        elif isinstance(img, bool) and not img:
            img = np.zeros(shape=(self.img.shape), dtype=self.img.dtype)
        else:
            img = imread(img, dtype='uint8')
        gray = False
        if img.ndim == 2:
            gray = True
            # need a color 8 bit image
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        # Draw and display the corners
        cv2.drawChessboardCorners(img, self.opts['size'],
                                  self.opts['imgPoints'][-1],
                                  self.opts['foundPattern'][-1])
        if gray:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        return img
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def visPair(self, depth, pose=None, trans=None, com=None, ratio=None):
        img = depth[0].copy()
        img = (img+1)*127.0
        img = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
        if pose is None:
            return img

        skel = pose.copy()
        skel.shape = (-1, 3)
        skel = skel*ratio
        skel2 = []
        for pt in skel:
            pt2 = Camera.to2D(pt+com)
            pt2[2] = 1.0
            pt2 = np.dot(trans, pt2)
            pt2.shape = (3,1)
            pt2 = (pt2[0],pt2[1])
            skel2.append(pt2)
        for idx, pt2 in enumerate(skel2):
            cv2.circle(img, pt2, 3, 
                       data.util.figColor[colorPlatte[idx]], -1)
        for b in bones:
            pt1 = skel2[b[0]]
            pt2 = skel2[b[1]]
            color = b[2]
            cv2.line(img,pt1,pt2,color,2)
        return img
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def visualizeCrop(self, norm_skel = None):
        img = self.norm_dm.copy()
        img = (img+0.5)*255.0
        colorImg = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
        if norm_skel is None:
            return colorImg

        self.setNormSkel(norm_skel)
        skel2D = self.crop2D()
        for pt in skel2D:
            cv2.circle(colorImg, (pt[0], pt[1]), 2, (0,0,255), -1)
        return colorImg
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def visualizeFull(self, norm_skel = None):
        img = self.dm.copy()
        img[img >= Camera.far_point] = 0
        img = img*(256/img.max())
        colorImg = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
        if norm_skel is None:
            return colorImg

        self.setNormSkel(norm_skel)
        skel2D = self.full2D()
        for pt in skel2D:
            cv2.circle(colorImg, (pt[0], pt[1]), 5, (0,0,255), -1)
        return colorImg
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def vis_normed_pose(normed_vec, img=None):
    import depth
    pt2 = projectNormPose3D(normed_vec)

    if not type(img) is np.ndarray:
        img = np.ones((depth.DepthMap.size2[0], depth.DepthMap.size2[1]))*255

    img = img.reshape(depth.DepthMap.size2[0], depth.DepthMap.size2[1])
    img = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
    for idx, pt in enumerate(pt2):
        cv2.circle(img, (int(pt[0]), int(pt[1])), 3, (0,0,255), -1)
    return img
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def apply_filters(self, frame):
        """Apply specified filters to frame.

        Args:
            frame (np.ndarray): frame to be modified.
        Returns:
            n_frame (np.ndarray): modified frame.
        """
        n_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        if 'g-blur' in self.filters:
            n_frame = cv2.GaussianBlur(n_frame, (5,5), 0)
        if 'b-filtering' in self.filters:
            n_frame = cv2.bilateralFilter(n_frame, 9, 75, 75)
        if 't_adaptive' in self.filters:
            n_frame = cv2.adaptiveThreshold(n_frame, 255,
                                            cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                            cv2.THRESH_BINARY, 115, 1)
        if 'otsu' in self.filters:
            _, n_frame = cv2.threshold(n_frame, 125, 255,
                                       cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        if 'canny' in self.filters:
            n_frame = cv2.Canny(n_frame, 100, 200)
        if 'b-subtraction' in self.filters:
            n_frame = self.subtractor.apply(frame)

        n_frame = cv2.cvtColor(n_frame, cv2.COLOR_GRAY2BGR)

        return n_frame
项目:basicCV    作者:chenminhua    | 项目源码 | 文件源码
def draw_flow(im, flow, step=16):
  h, w = im.shape[:2]
  y, x = mgrid[step/2:h:step, step/2:w:step].reshape(2, -1)
  fx, fy = flow[y, x].T

  lines = vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
  lines = int32(lines)

  vis = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
  for (x1, y1), (x2, y2) in lines:
    cv2.line(vis, (x1, y1), (x2, y2), (0, 255, 0), 1)
    cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
  return vis
项目:Smart-Car    作者:jimchenhub    | 项目源码 | 文件源码
def drawlines(img1,img2,lines,pts1,pts2):
    ''' img1 - image on which we draw the epilines for the points in img2
    lines - corresponding epilines '''
    r,c = img1.shape
    img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
    img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
    for r,pt1,pt2 in zip(lines,pts1,pts2):
        color = tuple(np.random.randint(0,255,3).tolist())
        x0,y0 = map(int, [0, -r[2]/r[1] ])
        x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
        img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
        img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
        img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
    return img1,img2
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def renderCalibrationResult(self):
        num_circles = 9
        num_points = 64
        num = num_circles * num_points
        altaz = np.ones((num, 2))

        for c in range(num_circles):
            alt = c / 18 * np.pi
            for p in range(num_points):
                az = p / num_points * 2 * np.pi
                index = c * num_points + p
                altaz[index, 0] = alt
                altaz[index, 1] = az

        pos = self.calibrator.transform(altaz)
        inpos = self.renderer.altazToPos(altaz)

        image = self.calibrator.image.copy()
        sky = cv2.cvtColor(self.renderer.image.copy(), cv2.COLOR_GRAY2BGR)

        for c in range(num_circles):
            pts = np.array(pos[c * num_points:(c + 1) * num_points, :], np.int32)
            pts = pts.reshape((-1,1,2))
            cv2.polylines(image, [pts], True, (255, 0, 0))

            pts = np.array(inpos[c * num_points:(c + 1) * num_points, 0:2], np.int32)
            pts = pts.reshape((-1,1,2))
            cv2.polylines(sky, [pts], True, (255, 0, 0))

        correspondences = self.calibrator.getCurrentCorrespondences()

        for correspondence in correspondences:
            if correspondence.pos is not None:
                cv2.circle(image, correspondence.pos, self.circle_radius, self.selected_color)
            if correspondence.altaz is not None:
                altaz = correspondence.altaz

                pos = np.array(self.calibrator.transform(altaz), np.int32)[0] # np.array([np.array([a.radian for a in altaz])]) TODO
                pos = (pos[0], pos[1])

                cv2.circle(image, pos, self.circle_radius, self.marked_color)
                self.renderer.highlightStar(sky, correspondence.altaz, self.circle_radius, self.marked_color)

        cv2.imshow(self.image_window, image)
        cv2.imshow(self.sky_window, sky)
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def __init__(self):
        self.mask = cv2.cvtColor(SkyCamera.getBitMask(), cv2.COLOR_GRAY2BGR)
项目:cv-utils    作者:gmichaeljaison    | 项目源码 | 文件源码
def gray3ch(img):
    return cv.cvtColor(img, cv.COLOR_GRAY2BGR) if is_gray(img) else img
项目:ppap_detect    作者:ashitani    | 项目源码 | 文件源码
def set_obj_(bg,fg, pos):
    ret=bg.copy()
    mask = fg[:,:,3]
    mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
    mask = mask/ 255.0
    fg_rgb=fg[:,:,:3]
    fg_rgb=fg_rgb/255.0
    ret=ret/255.0

    x,y = pos
    siz=np.shape(fg_rgb)

    ret[y:(y+siz[0]),x:(x+siz[1]),:]*= (1-mask)
    ret[y:(y+siz[0]),x:(x+siz[1]),:]+= fg_rgb
    return ret
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def _log_contours_on_current_image(self, contours, name: str) -> None:
        """
        Args:
            contours (list[np,ndarray]):
        """
        img = cv2.cvtColor(self._img.copy(), cv2.COLOR_GRAY2BGR)
        cv2.drawContours(img, contours, contourIdx=-1, color=(0, 0, 255), thickness=1)
        self.intermediate_images.append(NamedImage(img, name))
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def draw_image_with_contours(img: np.ndarray, contours, window_title: str = "Image with contours") -> None:
    img_colour = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.drawContours(img_colour, contours, contourIdx=-1, color=(0, 0, 255), thickness=1)
    draw_image(img_colour, window_title)
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def create_debug_image(image):
    debug_image = cv2.resize(image, (0, 0), fx=DEBUG_SCALE_FACTOR, fy=DEBUG_SCALE_FACTOR)
    debug_image = cv2.cvtColor(debug_image, cv2.COLOR_GRAY2BGR)
    return debug_image
项目:vin-keras    作者:neka-nat    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='VIN')
    parser.add_argument('--data', '-d', type=str, default='./data/map_data.pkl',
                        help='Path to map data generated with script_make_data.py')
    parser.add_argument('--model', '-m', type=str, default='vin_model_weights.h5',
                        help='Model from given file')
    args = parser.parse_args()

    k = 20
    train, test = process_map_data(args.data)
    model = vin_model(l_s=test[0].shape[2], k=k)
    model.load_weights(args.model)

    for d in zip(*test):
        im = d[0]
        pos = d[1]
        action, reward, value = predict(im, pos, model, k)

        path = [tuple(pos)]
        for _ in range(30):
            if im[1][pos[1], pos[0]] == 1:
                break
            action, _, _ = predict(im, pos, model, k)
            dx, dy = get_action(action)
            pos[0] = pos[0] + dx
            pos[1] = pos[1] + dy
            path.append(tuple(pos))

        test_img = cv2.cvtColor(im[0], cv2.COLOR_GRAY2BGR)
        goal = find_goal(im[1])

        for s in path:
            cv2.rectangle(test_img, (s[0], s[1]), (s[0], s[1]), (1, 0, 0), -1)
        cv2.rectangle(test_img, (path[0][0], path[0][1]), (path[0][0], path[0][1]), (0, 1, 1), -1)
        cv2.rectangle(test_img, (goal[0], goal[1]), (goal[0], goal[1]), (0, 0, 1), -1)
        cv2.imshow("image", cv2.resize(255 - test_img * 255, (300, 300), interpolation=cv2.INTER_NEAREST))
        cv2.imshow("reward", cv2.resize(reward, (300, 300), interpolation=cv2.INTER_NEAREST))
        cv2.imshow("value", cv2.resize(value / 80, (300, 300), interpolation=cv2.INTER_NEAREST))
        cv2.waitKey(0)
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def show(self, frame, pose):
        """
        Show depth with overlayed joints
        :param frame: depth frame
        :param pose: joint positions
        :return: image
        """

        # plot depth image with annotations
        imgcopy = frame.copy()
        # display hack to hide nd depth
        msk = numpy.logical_and(32001 > imgcopy, imgcopy > 0)
        msk2 = numpy.logical_or(imgcopy == 0, imgcopy == 32001)
        min = imgcopy[msk].min()
        max = imgcopy[msk].max()
        imgcopy = (imgcopy - min) / (max - min) * 255.
        imgcopy[msk2] = 255.
        imgcopy = imgcopy.astype('uint8')
        imgcopy = cv2.cvtColor(imgcopy, cv2.COLOR_GRAY2BGR)

        jtI = self.importer.joints3DToImg(pose)
        for i in range(jtI.shape[0]):
            cv2.circle(imgcopy, (jtI[i, 0], jtI[i, 1]), 3, (255, 0, 0), -1)

        import matplotlib
        if pose.shape[0] == 16:
            jointConnections = [[0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8], [8, 9], [0, 10],
                                 [10, 11], [11, 12], [0, 13], [13, 14], [14, 15]]
            jointConnectionColors = [matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 1]]]))[0, 0],
                                      matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 1]]]))[0, 0],
                                      matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 1]]]))[0, 0],
                                      matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 1]]]))[0, 0],
                                      matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 1]]]))[0, 0]]
        elif pose.shape[0] == 14:
            jointConnections = [[13, 1], [1, 0], [13, 3], [3, 2], [13, 5], [5, 4], [13, 7], [7, 6], [13, 10],
                                     [10, 9], [9, 8], [13, 11], [13, 12]]
            jointConnectionColors = [matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.00, 1, 1]]]))[0, 0],
                                          matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.33, 1, 1]]]))[0, 0],
                                          matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.50, 1, 1]]]))[0, 0],
                                          matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.66, 1, 1]]]))[0, 0],
                                          matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.6]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 0.8]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.83, 1, 1]]]))[0, 0],
                                          matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.16, 1, 0.7]]]))[0, 0], matplotlib.colors.hsv_to_rgb(numpy.asarray([[[0.16, 1, 1]]]))[0, 0]]
        else:
            raise ValueError("Invalid number of joints")

        for i in range(len(jointConnections)):
            cv2.line(imgcopy, (jtI[jointConnections[i][0], 0], jtI[jointConnections[i][0], 1]),
                     (jtI[jointConnections[i][1], 0], jtI[jointConnections[i][1], 1]), 255.*jointConnectionColors[i], 2)

        return imgcopy
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if requested_eye_frame_idx != self.eye_frames[eye_index].index:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video {}.".format(eye_index))
            else:
                #our old frame is still valid because we are doing upsampling
                pass

            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)

            #5. finally overlay the image
            x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
            transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def openImage(path, useCache=USE_CACHE):

    global CACHE

    #using a dict {path:image} cache saves some time after first epoch
    #but may consume a lot of RAM
    if path in CACHE:
        return CACHE[path]
    else:

        #open image
        img = cv2.imread(path)

        #DEBUG
        try:
            h, w = img.shape[:2]
        except:
            print "IMAGE NONE-TYPE:", path

        #original image dimensions
        try:
            h, w, d = img.shape

            #to gray?
            if IM_DIM == 1:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        except:
            h, w = img.shape

            #to color?
            if IM_DIM == 3:
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        #resize to conv input size
        img = cv2.resize(img, (IM_SIZE[0], IM_SIZE[1]))

        #convert to floats between 0 and 1
        img = np.asarray(img / 255., dtype='float32')  

        if useCache:
            CACHE[path] = img
        return img
项目:pool-assist    作者:sahirv    | 项目源码 | 文件源码
def findCircles():
    # read image - 0 is greyscale, 1 - color
    table_img = cv2.imread('training_sets/tables/extable6.png', 1)
    table_img_col = table_img.copy()
    table_img_grey = cv2.cvtColor(table_img, cv2.COLOR_BGR2GRAY)
    table_orig = table_img_grey.copy()

    # smooth
    table_img_grey = cv2.blur(table_img_grey, (3,3))

    # perform canny edge detection
    table_canny = cv2.Canny(table_img_grey, 15, 30)
    t_c_copy = table_canny.copy()

    # Perform Hough circle transform
    circles = cv2.HoughCircles(table_canny, cv2.HOUGH_GRADIENT, 1, 25, param1=90, param2=30, maxRadius=50, minRadius=14)

    avgObjRadius = 0
    stripes = []
    solids = []
    cueBall = (0,0)
    pockets = []
    if circles is not None:
        print("Found circles")
        circles = np.round(circles[0, :]).astype("int")
        totAvgRadius = sum(i[2] for i in circles) // len(circles)
        objBallCounter = 0
        for x, y, r in circles:
            if r <= totAvgRadius:
                objBallCounter += 1
                avgObjRadius += r
        avgObjRadius = avgObjRadius // objBallCounter
        for x, y, r in circles:
            if r > 30:
                pockets.append([x, y, r])
                cv2.circle(table_img, (x, y), r, (0, 210, 30), 3)
            else:
                # store pixels within circle below
                ball = isolateBall(x, y, avgObjRadius, table_img)
                ballType = classifyBall(ball)
                if ballType == "stripe":
                    stripes.append((x, y))
                elif ballType == "solid":
                    solids.append((x, y))
                elif ballType == "cue":
                    cueBall = (x, y)
                else:
                    raise Exception("Ball can not be classified. X= " + x + " Y= " + y)
                cv2.circle(table_img, (x, y), avgObjRadius, (150, 100, 255), 4)

    #concatenate before+after images

    img = np.concatenate((table_img_col, cv2.cvtColor(t_c_copy, cv2.COLOR_GRAY2BGR), table_img), axis=0)

    filename = 'img.png'
    cv2.imwrite(filename, img)
    return filename
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def send_hazard_camera(self):
        """ Dont bump into things! """
        self.zarj.pelvis.lean_body_to(0)
        self.zarj.neck.neck_control([0.5, 0.0, 0.0], True)
        rospy.sleep(1.0)
        cloud = self.zarj.eyes.get_stereo_cloud()
        forward, _ = self.zarj.eyes.get_cloud_image_with_details(cloud)
        forward = cv2.cvtColor(forward, cv2.COLOR_BGR2GRAY)
        forward = cv2.copyMakeBorder(forward, 0, 0, 560, 630,
                                     cv2.BORDER_CONSTANT, value=(0, 0, 0))

        self.zarj.neck.neck_control([0.5, 1.0, 0.0], True)
        rospy.sleep(1.0)
        cloud = self.zarj.eyes.get_stereo_cloud()
        right, _ = self.zarj.eyes.get_cloud_image_with_details(cloud)
        right = cv2.copyMakeBorder(right, 0, 0, 1190, 0,
                                   cv2.BORDER_CONSTANT, value=(0, 0, 0))
        right = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)

        self.zarj.neck.neck_control([0.5, -1.0, 0.0], True)
        rospy.sleep(1.0)
        cloud = self.zarj.eyes.get_stereo_cloud()
        left, _ = self.zarj.eyes.get_cloud_image_with_details(cloud)
        left = cv2.copyMakeBorder(left, 0, 0, 0, 1190,
                                  cv2.BORDER_CONSTANT, value=(0, 0, 0))
        left = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)

        self.zarj.neck.neck_control([0.0, 0.0, 0.0], True)

        haz_cam = cv2.bitwise_or(forward, left)
        haz_cam = cv2.bitwise_or(haz_cam, right)

        haz_cam = cv2.cvtColor(haz_cam, cv2.COLOR_GRAY2BGR)

        haz_cam = PERSPECTIVE_HEAD_DOWN.build_rangefinding_image(haz_cam)

        pictsize = np.shape(haz_cam)
        resized = cv2.resize(haz_cam, (pictsize[1]/2, pictsize[0]/2),
                             interpolation=cv2.INTER_AREA)

        (_, png) = cv2.imencode(".png", resized)
        msg = ZarjPicture("hazard", png)
        msg.time = rospy.get_time()
        self.zarj_comm.push_message(msg)
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def remove_pectoral(self, img, breast_mask, high_int_threshold=.8, 
                        morph_kn_size=3, n_morph_op=7, sm_kn_size=25):
        '''Remove the pectoral muscle region from an input image

        Args:
            img (2D array): input image as a numpy 2D array.
            breast_mask (2D array):
            high_int_threshold ([int]): a global threshold for high intensity 
                    regions such as the pectoral muscle. Default is 200.
            morph_kn_size ([int]): kernel size for morphological operations 
                    such as erosions and dilations. Default is 3.
            n_morph_op ([int]): number of morphological operations. Default is 7.
            sm_kn_size ([int]): kernel size for final smoothing (i.e. opening). 
                    Default is 25.
        Returns:
            an output image with pectoral muscle region removed as a numpy 
            2D array.
        Notes: this has not been tested on .dcm files yet. It may not work!!!
        '''
        # Enhance contrast and then thresholding.
        img_equ = cv2.equalizeHist(img)
        if high_int_threshold < 1.:
            high_th = int(img.max()*high_int_threshold)
        else:
            high_th = int(high_int_threshold)
        maxval = self.max_pix_val(img.dtype)
        _, img_bin = cv2.threshold(img_equ, high_th, 
                                   maxval=maxval, type=cv2.THRESH_BINARY)
        pect_marker_img = np.zeros(img_bin.shape, dtype=np.int32)
        # Sure foreground (shall be pectoral).
        pect_mask_init = self.select_largest_obj(img_bin, lab_val=maxval, 
                                                 fill_holes=True, 
                                                 smooth_boundary=False)
        kernel_ = np.ones((morph_kn_size, morph_kn_size), dtype=np.uint8)
        pect_mask_eroded = cv2.erode(pect_mask_init, kernel_, 
                                     iterations=n_morph_op)
        pect_marker_img[pect_mask_eroded > 0] = 255
        # Sure background - breast.
        pect_mask_dilated = cv2.dilate(pect_mask_init, kernel_, 
                                       iterations=n_morph_op)
        pect_marker_img[pect_mask_dilated == 0] = 128
        # Sure background - pure background.
        pect_marker_img[breast_mask == 0] = 64
        # Watershed segmentation.
        img_equ_3c = cv2.cvtColor(img_equ, cv2.COLOR_GRAY2BGR)
        cv2.watershed(img_equ_3c, pect_marker_img)
        img_equ_3c[pect_marker_img == -1] = (0, 0, 255)
        # Extract only the breast and smooth.
        breast_only_mask = pect_marker_img.copy()
        breast_only_mask[breast_only_mask == -1] = 0
        breast_only_mask = breast_only_mask.astype(np.uint8)
        breast_only_mask[breast_only_mask != 128] = 0
        breast_only_mask[breast_only_mask == 128] = 255
        kernel_ = np.ones((sm_kn_size, sm_kn_size), dtype=np.uint8)
        breast_only_mask = cv2.morphologyEx(breast_only_mask, cv2.MORPH_OPEN, 
                                            kernel_)
        img_breast_only = cv2.bitwise_and(img_equ, breast_only_mask)

        return (img_breast_only, img_equ_3c)