Python cv2 模块,COLOR_GRAY2RGB 实例源码

我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用cv2.COLOR_GRAY2RGB

项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe")
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def selectImage(self, index):
        if index >= len(self.files) or index < 0:
            self.ui.imageView.setText("No images found.")
            return

        self.index = index
        self.image = cv2.imread(self.files[index], 1)

        image = self.modes[self.current_mode].getImage()

        if len(image.shape) < 3 or image.shape[2] == 1:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        height, width, byteValue = self.image.shape
        byteValue = byteValue * width

        qimage = QtGui.QImage(image, width, height, byteValue, QtGui.QImage.Format_RGB888)

        self.ui.imageView.setPixmap(QtGui.QPixmap.fromImage(qimage))
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
        edges = cv2.Canny(img,obj.minVal,obj.maxVal)
        color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        edges=color

        if True:
            print "zeige"
            cv2.imshow(obj.Label,edges)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=edges
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_CannyEdge(proxy,obj):
    ''' create Canny Edge image with two parameters'''

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    edges = cv2.Canny(img,obj.minVal,obj.maxVal)
    obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
    say(["Canny Edge image updated",obj.minVal,obj.maxVal])
项目:ConditionalGAN    作者:seungjooli    | 项目源码 | 文件源码
def detect_edges(images):
        def blur(image):
            return cv2.GaussianBlur(image, (5, 5), 0)

        def canny_otsu(image):
            scale_factor = 255
            scaled_image = np.uint8(image * scale_factor)

            otsu_threshold = cv2.threshold(
                cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0]
            lower_threshold = max(0, int(otsu_threshold * 0.5))
            upper_threshold = min(255, int(otsu_threshold))
            edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold)
            edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

            return np.float32(edges) * (1 / scale_factor)

        blurred = [blur(image) for image in images]
        canny_applied = [canny_otsu(image) for image in blurred]

        return canny_applied
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def to_rgb(image):
    """Converts a grayscaled image to a colored one.
    Parameters
    ----------
    image: ndarray(uint8)
        A grayscaled image with the shape of [height, width, 1]
        or of shape [height, widht].
    Returns
    ---------
    image: ndarray(uint8)
        Returns a converted image with shape [height, width, 3].
    """
    image_shape = image.shape

    if len(image_shape) > 2:
        img_channels = image_shape[2]
        if img_channels == 1:
            image = np.squeeze(image, axis=2)

        if img_channels != 3:
            image = cast(image)
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    return image
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def display_panel_mergeframe(self, arg_frame, arg_stepX, arg_stepY): 
        print '*** ',len(arg_frame.shape)
        if len(arg_frame.shape)==3:
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB)
        else: 
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_GRAY2RGB)

        tmp_frame= cv2.resize(tmp_frame,(self.mergeframe_splitX,self.mergeframe_splitY),interpolation=cv2.INTER_LINEAR)
        begX= gui_vars.interval_x+self.mergeframe_splitX*arg_stepX
        begY= self.mergeframe_spaceY+ self.mergeframe_splitY* arg_stepY 
        self.mergeframe[begY:begY+ self.mergeframe_splitY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        #begY= self.mergeframe_height- 50- self.mergeframe_splitY*arg_stepY
        #self.mergeframe[begY-self.mergeframe_splitY:begY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        self.mergeframe_stepX= arg_stepX
        self.mergeframe_stepY= arg_stepY
        print '>> mergeframe_splitY, splitX= ', self.mergeframe_splitY, ', ', self.mergeframe_splitX
        print '>> tmp_frame.shape[0,1]= ', tmp_frame.shape[0],', ',tmp_frame.shape[1]

        result = Image.fromarray(self.mergeframe)
        result = ImageTk.PhotoImage(result)
        self.panel_mergeframe.configure(image = result)
        self.panel_mergeframe.image = result
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
        # Otsu's thresholding after Gaussian filtering
        tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
        blur = cv2.GaussianBlur(tmp,(5,5),0)
        if arg_binaryMethod== 0:
            ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
        elif arg_binaryMethod == 1:
            ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        elif arg_binaryMethod== 2:
            thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)

        result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
        ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)

        rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]

        for rect , cntr in rects:
            cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
        if arg_export_index:
            cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
        print "Get Contour success"
        return result
项目:Semi-automatic-Annotation    作者:Luoyadan    | 项目源码 | 文件源码
def disp_segBI_on(self):
        print "displaying segBI ON"
        ## unable edit
        self.parent().mode = "view"
        self.img_arr_tmp = self.img_arr.copy()
        img_arr = self.ori_img.copy()

        ## display binary img
        segBI = np.zeros(img_arr.shape[:2], np.uint8)
        segBI[self.seg_arr == self.current_label] = 255
        segBI = cv2.cvtColor(segBI, cv2.COLOR_GRAY2RGB)


        if self.Zoomed == True:
            large_segBI = Image.fromarray(segBI).resize((self.w * self.zRate, self.h * self.zRate), Image.NEAREST)
            cropped_segBI = large_segBI.crop(tuple(self.zoom_pos))
            segBI = np.array(cropped_segBI)

        self.img_arr = segBI
        self.update()
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_color(im, flip_rb=False): 
    if im.ndim == 2: 
        return cv2.cvtColor(im, cv2.COLOR_GRAY2RGB if flip_rb else cv2.COLOR_GRAY2BGR)
    else: 
        return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if flip_rb else im.copy()
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            img = obj.imageNode.ViewObject.Proxy.img.copy()

        print (obj.blockSize,obj.ksize,obj.k)
        try:
            gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            gray = np.float32(gray)
            print "normale"
        except:
            im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
            gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)
            print "except"

        dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
        dst = cv2.dilate(dst,None)

        img[dst>0.01*dst.max()]=[0,0,255]

        dst2=img.copy()
        dst2[dst<0.01*dst.max()]=[255,255,255]
        dst2[dst>0.01*dst.max()]=[0,0,255]

        if not obj.matplotlib:
            cv2.imshow(obj.Label,img)
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst2,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()

        self.img=img
项目:object-detection-python-opencv    作者:hasanaliqureshi    | 项目源码 | 文件源码
def overlay_mask(mask, image):
    #make the mask rgb
    rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
    #calculates the weightes sum of two arrays. in our case image arrays
    #input, how much to weight each.
    #optional depth value set to 0 no need
    img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)
    return img
项目:Bag-of-Visual-Words-Python    作者:kushalvyas    | 项目源码 | 文件源码
def testModel(self):
        """ 
        This method is to test the trained classifier

        read all images from testing path 
        use BOVHelpers.predict() function to obtain classes of each image

        """

        self.testImages, self.testImageCount = self.file_helper.getFiles(self.test_path)

        predictions = []

        for word, imlist in self.testImages.iteritems():
            print "processing " ,word
            for im in imlist:
                cl = self.recognize(im)
                predictions.append({
                    'image':im,
                    'class':cl,
                    'object_name':self.name_dict[str(int(cl[0]))]
                    })

        print predictions
        for each in predictions:
            # cv2.imshow(each['object_name'], each['image'])
            # cv2.waitKey()
            # cv2.destroyWindow(each['object_name'])
            # 
            plt.imshow(cv2.cvtColor(each['image'], cv2.COLOR_GRAY2RGB))
            plt.title(each['object_name'])
            plt.show()
项目:vehicle_detection    作者:AuzanMuh    | 项目源码 | 文件源码
def cvtGRAY2RGB(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
    return frame
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def find(self, img):
        self.height, self.width = img.shape[:2]

        armImg = self._extract_arm(img) 
        armImg2 = armImg.copy()

        (contours, defects) = self._find_hull_defects(armImg)

        outImg = cv2.cvtColor(armImg2, cv2.COLOR_GRAY2RGB)

        (outImg, num_fingers) = self._detect_num_fingers(contours, defects, outImg)

        return (outImg, num_fingers)
        # return outImg
项目:GidroGraf-Sirius    作者:alf3r    | 项目源码 | 文件源码
def find_contours(self):
        im2, contours, hierarchy = cv2.findContours(self.data, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        self.data = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGB)
        cv2.drawContours(self.data, contours, -1, (255, 0, 0), 20)
项目:keras-semantic-segmentation-example    作者:mrgloom    | 项目源码 | 文件源码
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,5*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
项目:keras-semantic-segmentation-example    作者:mrgloom    | 项目源码 | 文件源码
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,7*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    y_pred[:,:,1][y_pred[:,:,1] > 0.5] = 255
    res[:,5*IMAGE_W:6*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,6*IMAGE_W:7*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
项目:keras-semantic-segmentation-example    作者:mrgloom    | 项目源码 | 文件源码
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,7*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    y_pred[:,:,1][y_pred[:,:,1] > 0.5] = 255
    res[:,5*IMAGE_W:6*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,6*IMAGE_W:7*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
项目:keras-semantic-segmentation-example    作者:mrgloom    | 项目源码 | 文件源码
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,0])', np.max(mask[:,:,0]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,4*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    res[:,3*IMAGE_W:4*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
项目:keras-semantic-segmentation-example    作者:mrgloom    | 项目源码 | 文件源码
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,0])', np.max(mask[:,:,0]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,4*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    res[:,3*IMAGE_W:4*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
项目:pytorch_crowd_count    作者:BingzheWu    | 项目源码 | 文件源码
def read_gray_img(img_path):
    bgr = cv2.imread(img_path)
    #bgr = cv2.resize(bgr, (225, 225))
    gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
    gray_3 = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
    print gray.shape
    plt.imshow(gray_3)
    plt.show()
    return np.expand_dims(gray_3,0).transpose((0,3,1,2))
项目:Semi-automatic-Annotation    作者:Luoyadan    | 项目源码 | 文件源码
def disp_finalID_on(self):
        print "displaying self.final_ID ON"
        ## unable edit
        self.parent().mode = "view"
        self.img_arr_tmp = self.img_arr.copy()

        # img_arr = int8_to_uint8(self.final_ID, self.int8_to_uint8_OFFSET)
        img_arr = np.zeros(self.final_ID.shape, np.uint8)
        img_arr[self.final_ID == self.cur_line_ID] = 255
        img_arr = cv2.cvtColor(img_arr, cv2.COLOR_GRAY2RGB)

        self.img_arr = img_arr
        self.update()
项目:deepvisualminer    作者:pathbreak    | 项目源码 | 文件源码
def _execute_pipeline_on_image(self, input_data):

        if input_data['img'].ndim == 3:
            # It *appears* imageio imread returns RGB or RGBA, not BGR...confirmed using a blue
            # filled rectangle that imageio is indeed RGB which is opposite of OpenCV's default BGR.
            # Use RGB consistently everywhere.
            if input_data['img'].shape[-1] == 4:
                input_data['gray'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGBA2GRAY)
                print("Input image seems to be 4-channel RGBA. Creating 3-channel RGB version")
                input_data['img'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGBA2RGB)
            else:
                input_data['gray'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGB2GRAY)

        elif input_data['img'].ndim == 2:
            # If input is a grayscale image, it'll have just 2 dimensions, 
            # but Darkflow code expects 3 dimensions. So always keep 'img' a 3 dimension
            # image no matter what.
            print("Input image is grayscale. Creating RGB version")
            input_data['gray'] = input_data['img'].copy()
            input_data['img'] = cv2.cvtColor(input_data['img'], cv2.COLOR_GRAY2RGB)

        else:
            raise "Unknown image format " + input_data['img'].shape

        print("Input image:", input_data['img'].shape)
        print("Grayscale image:", input_data['gray'].shape)

        for comp in self.components:
            print("Executing %s on %s frame %d" % (comp.name, input_data['file'], input_data.get('frame', 0)))
            comp_outputs = comp.execute(input_data, self.input_directory, self.output_directory)

            # At each stage of the pipeline, collect the component's outputs
            # and add them to the input data so that they're available for 
            # downstream components.
            input_data[comp.name] = comp_outputs


        # Release the image arrays.
        input_data['img'] = None
        input_data['gray'] = None
项目:braid    作者:Arya-ai    | 项目源码 | 文件源码
def create_fixed_image_shape(img, frame_size=(200, 200, 3), random_fill=True,
                             fill_val=0, mode='fit'):
    # if mode == 'fit':
    X1, Y1 = frame_size[1], frame_size[0]
    image_frame = np.ones(frame_size, dtype=np.uint8) * fill_val
    if random_fill:
        image_frame = np.random.randint(
            0, high=255, size=frame_size).astype(np.uint8)

    if ((img.ndim == 2 or img.shape[2] == 1) and
            (len(frame_size) == 3 and frame_size[2] == 3)):
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    X2, Y2 = img.shape[1], img.shape[0]

    if float(X1) / Y1 >= float(X2) / Y2:
        scale = float(Y1) / Y2
    else:
        scale = float(X1) / X2

    img = cv2.resize(img, None, fx=scale, fy=scale)
    sx, sy = img.shape[1], img.shape[0]

    yc = int(round((frame_size[0] - sy) / 2.))
    xc = int(round((frame_size[1] - sx) / 2.))
    image_frame[yc:yc + sy, xc:xc + sx] = img
    assert image_frame.shape == frame_size

    return image_frame
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_HoughLines(proxy,obj):
    ''' find houghlines '''

    # parameter from obj
    canny1=obj.canny1
    canny2=obj.canny2
    rho=obj.rho
    theta=obj.theta
    threshold=obj.threshold
    minLineLength =obj.minLineLength
    maxLineGap =obj.maxLineGap

    # load the image
    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # find edges
    # naechst zwei zeilen koennen wahrscheinlich weg. #+#
    edges = cv2.Canny(img,canny1,canny2)
    obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,canny1,canny2)
    xsize=img.shape[1]
    ysize=img.shape[0]

    # find lines
    lines = cv2.HoughLinesP(edges,1,np.pi/180*theta,threshold, minLineLength = minLineLength, maxLineGap = maxLineGap)

    k=0
    fclines=[]
    img = 0 *img

    for l in lines:
        k += 1
        [[x1,y1,x2,y2]] = l
        fl=tools.fcline(x1,-y1,x2,-y2)
        fclines.append(fl)       
        print (x1,y1,x2,y2)
        a=cv2.line(img,(x1,y1),(x2,y2),(0,255,0),1)

    # data for following nodes
    obj.Proxy.img=img
    obj.Proxy.fclines=fclines
    obj.Proxy.lines=lines

    # method for extra calculations
    obj.Proxy.__class__.linelengths=property(lambda self: linelengths(self))
    obj.Proxy.__class__.directions=property(lambda self: directions(self))
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def match(self, im0, im1, hm0, hm1):
        viz = False
        mask0 = self.BG0.segment(im0)
        mask1 = self.BG1.segment(im1)

        im0 = im0 * (mask0>1e-10).astype('uint8')[:,:,np.newaxis]
        im1 = im1 * (mask1>1e-10).astype('uint8')[:,:,np.newaxis]

        if viz:
            viz0 = np.copy(im0)
            viz1 = np.copy(im1)
        pts14 = []
        for chan in range(14):
            h0 = cv2.resize(hm0[:,:,chan], (ORIG_SIZE, ORIG_SIZE))
            h1 = cv2.resize(hm1[:,:,chan], (ORIG_SIZE, ORIG_SIZE))
            y0, x0 = argmax_2d(h0)
            y1, x1 = argmax_2d(h1)

            target = take_patch(im0, y0, x0, PATCH_SIZE)
            region = take_patch(im1, y1, x1, REGION_SIZE)

            res = cv2.matchTemplate(region, target, cv2.TM_CCOEFF_NORMED)
            _, _, _, top_left = cv2.minMaxLoc(res)
            top_left = top_left[::-1]
            center_in_region = (top_left[0] + PATCH_SIZE, top_left[1] + PATCH_SIZE)
            center_in_im1 = (center_in_region[0] + y1-REGION_SIZE,
                    center_in_region[1] + x1-REGION_SIZE)

            if viz:
                cv2.circle(viz0, (x0,y0), 3, (0,0,255), -1)
                cv2.circle(viz1, tuple(center_in_im1[::-1]), 3, (0,0,255), -1)
            pts14.append([x0, y0, center_in_im1[1],center_in_im1[0]])
        if viz:
            mask0 = cv2.cvtColor(mask0, cv2.COLOR_GRAY2RGB).astype('uint8')
            mask1 = cv2.cvtColor(mask1, cv2.COLOR_GRAY2RGB).astype('uint8')
            viz = np.concatenate((mask0, viz0,viz1, mask1),axis=1)
            cv2.imshow("v", viz)
            cv2.waitKey(1)
        return np.array(pts14)
        return viz, np.array(pts14)

        #rv = np.copy(region)
        #rv[center_in_region[0],center_in_region[1]] = (0,0,255)
        #tv = cv2.resize(target, tuple(region.shape[:2][::-1]))

        #hv = np.zeros((region.shape), dtype='float32')
        #res = res - res.min()
        #res = res / res.max() * 255
        #res = cv2.cvtColor(res, cv2.COLOR_GRAY2RGB)
        #hv[PATCH_SIZE:PATCH_SIZE+res.shape[0],PATCH_SIZE:PATCH_SIZE+res.shape[1],:] = res
        #region = np.concatenate((region, rv, tv, hv), axis=1)
        #cv2.imwrite("patchmatch/region{}.png".format(chan), region)
项目:TensorBox    作者:Russell91    | 项目源码 | 文件源码
def load_idl_tf(idlfile, H, jitter):
    """Take the idlfile and net configuration and create a generator
    that outputs a jittered version of a random image from the annolist
    that is mean corrected."""

    annolist = al.parse(idlfile)
    annos = []
    for anno in annolist:
        anno.imageName = os.path.join(
            os.path.dirname(os.path.realpath(idlfile)), anno.imageName)
        annos.append(anno)
    random.seed(0)
    if H['data']['truncate_data']:
        annos = annos[:10]
    for epoch in itertools.count():
        random.shuffle(annos)
        for anno in annos:
            try:
                if 'grayscale' in H and 'grayscale_prob' in H:
                    I = imread(anno.imageName, mode = 'RGB' if random.random() < H['grayscale_prob'] else 'L')
                    if len(I.shape) < 3:
                        I = cv2.cvtColor(I, cv2.COLOR_GRAY2RGB)
                else:
                    if len(I.shape) < 3:
                        continue
                    I = imread(anno.imageName, mode = 'RGB')
                if I.shape[0] != H["image_height"] or I.shape[1] != H["image_width"]:
                    if epoch == 0:
                        anno = rescale_boxes(I.shape, anno, H["image_height"], H["image_width"])
                    I = imresize(I, (H["image_height"], H["image_width"]), interp='cubic')
                if jitter:
                    jitter_scale_min=0.9
                    jitter_scale_max=1.1
                    jitter_offset=16
                    I, anno = annotation_jitter(I,
                                                anno, target_width=H["image_width"],
                                                target_height=H["image_height"],
                                                jitter_scale_min=jitter_scale_min,
                                                jitter_scale_max=jitter_scale_max,
                                                jitter_offset=jitter_offset)

                boxes, flags = annotation_to_h5(H,
                                                anno,
                                                H["grid_width"],
                                                H["grid_height"],
                                                H["rnn_len"])

                yield {"image": I, "boxes": boxes, "flags": flags}
            except Exception as exc:
                print(exc)
项目:TensorBox    作者:Russell91    | 项目源码 | 文件源码
def get_results(args, H, data_dir):
    tf.reset_default_graph()
    H["grid_width"] = H["image_width"] / H["region_size"]
    H["grid_height"] = H["image_height"] / H["region_size"]
    if args.frozen_graph:
        graph = load_frozen_graph(args.graphfile)
    else:
        new_saver = tf.train.import_meta_graph(args.graphfile)
    NUM_THREADS = 8
    with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS),
            graph=graph if args.frozen_graph else None) as sess:
        sess.run(tf.global_variables_initializer())
        if args.frozen_graph:
            x_in = graph.get_tensor_by_name('x_in:0')
            pred_boxes = graph.get_tensor_by_name('add:0')
            pred_confidences = graph.get_tensor_by_name('Reshape_2:0')
        else:
            new_saver.restore(sess, args.weights)
            x_in = tf.get_collection('placeholders')[0]
            pred_boxes, pred_confidences = tf.get_collection('vars')
            #freeze_graph.freeze_graph("overfeat.pb", "", False, args.weights, "add,Reshape_2", "save/restore_all",
             #"save/Const:0", "overfeat_frozen.pb", False, '') 

        pred_annolist = al.AnnoList()

        included_extenstions = ['jpg', 'bmp', 'png', 'gif']
        image_names = [fn for fn in os.listdir(args.datadir) if any(fn.lower().endswith(ext) for ext in included_extenstions)]
        image_dir = get_image_dir(args)
        subprocess.call('mkdir -p %s' % image_dir, shell=True)
        for i in range(len(image_names)):
            image_name = image_names[i]
            if H['grayscale']:
                orig_img = imread('%s/%s' % (data_dir, image_name), mode = 'RGB' if random.random() < H['grayscale_prob'] else 'L')
                if len(orig_img.shape) < 3:
                    orig_img = cv2.cvtColor(orig_img, cv2.COLOR_GRAY2RGB)
            else:
                orig_img = imread('%s/%s' % (data_dir, image_name), mode = 'RGB')
            img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic')
            feed = {x_in: img}
            start_time = time()
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
            time_2 = time()
            pred_anno = al.Annotation()
            pred_anno.imageName = image_name
            new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes,
                                            use_stitching=True, rnn_len=H['rnn_len'], min_conf=args.min_conf, tau=args.tau, show_suppressed=args.show_suppressed)
            print(time() - start_time)
            pred_anno.rects = rects
            pred_anno.imagePath = os.path.abspath(data_dir)
            pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1], test=True)
            pred_annolist.append(pred_anno)

            imname = '%s/%s' % (image_dir, os.path.basename(image_name))
            misc.imsave(imname, new_img)
            if i % 25 == 0:
                print(i)
    return pred_annolist
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def main(_):
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"

    # --------- load classifier ------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)
    model, x, keep_prob = get_nn_classifier()

    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'

    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)

    delta = [-2, -1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 0.99, 0.995, 0.999, 0.9995, 0.9999]

    start = time.time()
    candidates = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size,FLAGS.max_window_size))
    detected = nn_classification(candidates, img, model, x, keep_prob, delta)
    elapsed = (time.time() - start)  

    print 'detection time: %d' % elapsed

    # ------------- evaluation --------------#

    ground_truth_data = utils.get_ground_truth_data(csv_path)

    for j in xrange(0, len(delta)):
        detected[j] = [Rect(x, y, w, h) for (x,y,w,h) in detected[j]]
        tp, fn, fp = utils.evaluate(ground_truth_data, detected[j])

        # ----------------output ----------------#
        # image output
        """
        img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        for (x,y,w,h) in detected[j]:
            cv2.rectangle(img_out, (x-w/2,y-h/2),(x+w/2,y+h/2), [0,255,0], 3)

        for c in ground_truth_data:
            cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)

        output_file = "out" + '_' + str(datetime.datetime.now())
        cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
        """
        # csv output
        with open(FLAGS.output_dir + FLAGS.out + '.csv', 'ab') as file:
            writer = csv.writer(file, delimiter=',')
            writer.writerow([FLAGS.test, str(elapsed), str(len(ground_truth_data)), delta[j], FLAGS.minNeighbors, FLAGS.scaleFactor, 
                            str(len(detected[j])), str(tp), str(fp), str(fn)])
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def main():
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"

    # ------------ load classifier ---------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)

    # -------------- open image --------------#
    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)

    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'

    start = time.time()
    detected = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size, FLAGS.max_window_size))
    elapsed = (time.time() - start)
    print 'detection time: %d' % elapsed

    # ------------- evaluation --------------#
    detected = [Rect(x, y, w, h) for (x,y,w,h) in detected]
    ground_truth_data = utils.get_ground_truth_data(csv_path)

    tp, fn, fp = utils.evaluate(ground_truth_data, detected)

    # ----------------output ----------------#
    # image output
    """
    img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    for c in ground_truth_data:
        cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)

    for r in detected:
        cv2.rectangle(img_out, (r.x, r.y), (r.x2(), r.y2()), [0,255,0], 2)

    output_file = "out" + '_' + str(datetime.datetime.now())
    cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
    """
    # csv output
    with open(FLAGS.output_dir + 'results.csv', 'ab') as file:
        writer = csv.writer(file, delimiter=',')
        writer.writerow([FLAGS.test, str(elapsed),str(len(ground_truth_data)), str(FLAGS.scaleFactor), 
                         str(FLAGS.minNeighbors), str(len(detected)), str(tp), str(fp), str(fn)])