Python cv2 模块,COLOR_BGR2RGB 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.COLOR_BGR2RGB

项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image
项目:flask-app-for-mxnet-img-classifier    作者:XD-DENG    | 项目源码 | 文件源码
def get_image(file_location, local=False):
    # users can either 
    # [1] upload a picture (local = True)
    # or
    # [2] provide the image URL (local = False)
    if local == True:
        fname = file_location
    else:
        fname = mx.test_utils.download(file_location, dirname="static/img_pool")
    img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)

    if img is None:
         return None

    # convert into format (batch, RGB, width, height)
    img = cv2.resize(img, (224, 224))
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 1, 2)
    img = img[np.newaxis, :]

    return img
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def selectImage(self, index):
        if index >= len(self.files) or index < 0:
            self.ui.imageView.setText("No images found.")
            return

        self.index = index
        self.image = cv2.imread(self.files[index], 1)

        image = self.modes[self.current_mode].getImage()

        if len(image.shape) < 3 or image.shape[2] == 1:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        height, width, byteValue = self.image.shape
        byteValue = byteValue * width

        qimage = QtGui.QImage(image, width, height, byteValue, QtGui.QImage.Format_RGB888)

        self.ui.imageView.setPixmap(QtGui.QPixmap.fromImage(qimage))
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
项目:guided-filter    作者:lisabug    | 项目源码 | 文件源码
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e))
项目:STS-PiLot    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_over_img(self, img, x, y, x_pr, y_pr, bb_gt):
        """Plot the landmarks over the image with the bbox."""
        plt.close("all")
        fig = plt.figure(frameon=False)  # , figsize=(15, 10.8), dpi=200
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), aspect="auto")
        ax.scatter(x, y, s=10, color='r')
        ax.scatter(x_pr, y_pr, s=10, color='g')
        rect = patches.Rectangle(
            (bb_gt[0], bb_gt[1]), bb_gt[2]-bb_gt[0], bb_gt[3]-bb_gt[1],
            linewidth=1, edgecolor='b', facecolor='none')
        ax.add_patch(rect)
        fig.add_axes(ax)

        return fig
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def create_heatmaps(img, pred):
    """
    Uses objectness probability to draw a heatmap on the image and returns it
    """
    # find anchors with highest prediction
    best_pred = np.max(pred[..., 0], axis=-1)
    # convert probabilities to colormap scale
    best_pred = np.uint8(best_pred * 255)
    # apply color map
    # cv2 colormaps create BGR, not RGB
    cmap = cv2.cvtColor(cv2.applyColorMap(best_pred, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)
    # resize the color map to fit image
    cmap = cv2.resize(cmap, img.shape[1::-1], interpolation=cv2.INTER_NEAREST)

    # overlay cmap with image
    return cv2.addWeighted(cmap, 1, img, 0.5, 0)
项目:YOLO-Object-Detection-Tensorflow    作者:huseinzol05    | 项目源码 | 文件源码
def detect(img):
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (settings.image_size, settings.image_size))
    inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) * 2.0 - 1.0
    inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
    result = detect_from_cvmat(inputs)[0]
    print result

    for i in range(len(result)):
        result[i][1] *= (1.0 * img_w / settings.image_size)
        result[i][2] *= (1.0 * img_h / settings.image_size)
        result[i][3] *= (1.0 * img_w / settings.image_size)
        result[i][4] *= (1.0 * img_h / settings.image_size)

    return result
项目:PiWifiCam    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def _get_representation(self, bgr_image):
        """
        Gets the vector of a face in the image
        :param bgr_image: The input image
        :return: The vector representation
        """
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        bb = self._align.getLargestFaceBoundingBox(rgb_image)
        if bb is None:
            raise Exception("Unable to find a face in image")

        aligned_face = self._align.align(96, rgb_image, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        if aligned_face is None:
            raise Exception("Unable to align face bb image")

        return self._net.forward(aligned_face)
项目:bot2017Fin    作者:AllanYiin    | 项目源码 | 文件源码
def equal_color(img: Image, color):
    arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
    boundaries = []
    boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
                       [min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
    for (lower, upper) in boundaries:
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(arr_img, lower, upper)
        res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
        res = cv2.resize(res, (img.size[0], img.size[1]))
        cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(cv2_im)

        return output_img
项目:rekognition-video-utils    作者:awslabs    | 项目源码 | 文件源码
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
项目:ecs-mxnet-example    作者:awslabs    | 项目源码 | 文件源码
def predict(url, mod, synsets):
     req = urllib2.urlopen(url)
     arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
     cv2_img = cv2.imdecode(arr, -1)
     img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
     if img is None:
         return None
     img = cv2.resize(img, (224, 224))
     img = np.swapaxes(img, 0, 2)
     img = np.swapaxes(img, 1, 2)
     img = img[np.newaxis, :]

     mod.forward(Batch([mx.nd.array(img)]))
     prob = mod.get_outputs()[0].asnumpy()
     prob = np.squeeze(prob)

     a = np.argsort(prob)[::-1]
     out = ''
     for i in a[0:5]:
         out += 'probability=%f, class=%s' %(prob[i], synsets[i])
     out += "\n"
     return out
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def open_img(self, name, color = 'RGB'):
        """ Open an image 
        Args:
            name    : Name of the sample
            color   : Color Mode (RGB/BGR/GRAY)
        """
        if name[-1] in self.letter:
            name = name[:-1]
        img = cv2.imread(os.path.join(self.img_dir, name))
        if color == 'RGB':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            return img
        elif color == 'BGR':
            return img
        elif color == 'GRAY':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            print('Color mode supported: RGB/BGR. If you need another mode do it yourself :p')
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def detect(self, img):
        """ Method for Object Detection
        Args:
            img         : Input Image (BGR Image)
        Returns:
            result      : List of Bounding Boxes
        """
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))
        result = self.detect_from_cvmat(inputs)[0]
        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)
        return result
项目:fully-convolutional-network-semantic-segmentation    作者:alecng94    | 项目源码 | 文件源码
def preprocessImg(imgPath, clipSize):
    if clipSize != 0:
        im = enhance(imgPath, clipSize)
    else:
        im = cv2.imread(imgPath)
        im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))

    # switch to BGR, subtract mean
    in_ = np.array(im, dtype = np.float32)
    in_ = in_[:,:,::-1]
    in_ -= np.array((104.00698793,116.66876762,122.67891434))

    # make dims C x H x W for Caffe
    in_ = in_.transpose((2,0,1))

    return in_
项目:age-gender-classification    作者:yunsangq    | 项目源码 | 文件源码
def run(self, filename):
        img = cv2.imread(filename)
        self.h_img, self.w_img, _ = img.shape
        img_resized = cv2.resize(img, (448, 448))
        img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
        img_resized_np = np.asarray(img_RGB)
        inputs = np.zeros((1, 448, 448, 3), dtype='float32')
        inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
        in_dict = {self.x: inputs}
        net_output = self.sess.run(self.fc_19, feed_dict=in_dict)
        faces = self.interpret_output(net_output[0])
        images = []
        for i, (x, y, w, h, p) in enumerate(faces):
            images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))

        print('%d faces detected' % len(images))

        for (x, y, w, h, p) in faces:
            print('Face found [%d, %d, %d, %d] (%.2f)' % (x, y, w, h, p));
            self.draw_rect(img, x, y, w, h)
            # Fix in case nothing found in the image
        outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
        cv2.imwrite(outfile, img)
        return images, outfile
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def start_video(self, model):
        camera = cv2.VideoCapture(0)
        while True:
            frame = camera.read()[1]
            if frame is None:
                continue
            image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_array = cv2.resize(image_array, (300, 300))
            image_array = substract_mean(image_array)
            image_array = np.expand_dims(image_array, 0)
            predictions = model.predict(image_array)
            detections = detect(predictions, self.prior_boxes)
            plot_detections(detections, frame, 0.6,
                            self.arg_to_class, self.colors)
            cv2.imshow('webcam', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        camera.release()
        cv2.destroyAllWindows()
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # img = imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4],
                                                target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        # i commented this uncomment for the pytorch_eval
        # return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        return img, target, height, width
        # return torch.from_numpy(img), target, height, width # IDK WTF
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def pull_image(self, index):
        '''Returns the original image object at index in PIL form

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to show
        Return:
            PIL img
        '''
        img_id = self.ids[index]
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        return img
        # return imread(self._imgpath % img_id)
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _grabImage(self):
        w = self.display.widget
        rval, img = self.vc.read()
        if rval:
            # COLOR
            if self.pGrayscale.value():
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            else:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #img = cv2.transpose(img)
            if self.pFloat.value():
                img = toFloatArray(img)
            i = w.image
            b = self.pBuffer.value()
            if b:
                # BUFFER LAST N IMAGES
                if i is None or len(i) < b:
                    self.display.addLayer(data=img)
                else:
                    # TODO: implement as ring buffer using np.roll()
                    img = np.insert(i, 0, img, axis=0)
                    img = img[:self.pBuffer.value()]
                    w.setImage(img, autoRange=False, autoLevels=False)
            else:
                w.setImage(img, autoRange=False, autoLevels=False)
项目:visual_mpc    作者:febert    | 项目源码 | 文件源码
def init_traj_visualmpc_handler(self, req):
        self.igrp = req.igrp
        self.i_traj = req.itr

        self.t = 0
        if 'use_goalimage' in self.policyparams:
            goal_main = self.bridge.imgmsg_to_cv2(req.goalmain)
            goal_main = cv2.cvtColor(goal_main, cv2.COLOR_BGR2RGB)
            # goal_aux1 = self.bridge.imgmsg_to_cv2(req.goalaux1)
            # goal_aux1 = cv2.cvtColor(goal_aux1, cv2.COLOR_BGR2RGB)
            Image.fromarray(goal_main).show()
            goal_main = goal_main.astype(np.float32) / 255.
            self.cem_controller.goal_image = goal_main

        print 'init traj{} group{}'.format(self.i_traj, self.igrp)

        if 'ndesig' in self.policyparams:
            self.initial_pix_distrib = []
        else:
            self.initial_pix_distrib1 = []
            self.initial_pix_distrib2 = []

        self.cem_controller = CEM_controller(self.agentparams, self.policyparams, self.predictor, save_subdir=req.save_subdir)
        self.save_subdir = req.save_subdir
        return init_traj_visualmpcResponse()
项目:visual_mpc    作者:febert    | 项目源码 | 文件源码
def save(self, i_save, action, endeffector_pose):
        self.t_savereq = rospy.get_time()
        assert self.instance_type == 'main'

        if self.use_aux:
            # request save at auxiliary recorders
            try:
                rospy.wait_for_service('get_kinectdata', 0.1)
                resp1 = self.save_kinectdata_func(i_save)
            except (rospy.ServiceException, rospy.ROSException), e:
                rospy.logerr("Service call failed: %s" % (e,))
                raise ValueError('get_kinectdata service failed')

        if self.save_images:
            self._save_img_local(i_save)

        if self.save_actions:
            self._save_state_actions(i_save, action, endeffector_pose)

        if self.save_gif:
            highres = cv2.cvtColor(self.ltob.img_cv2, cv2.COLOR_BGR2RGB)
            print 'highres dim',highres.shape
            self.highres_imglist.append(highres)
项目:projectoxford    作者:zooba    | 项目源码 | 文件源码
def _renderResultOnImage(self, result, arr):
        """
            Draws boxes and text representing each face's emotion.
        """

        import operator, cv2

        img = cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            cv2.rectangle(img,(faceRectangle['left'],faceRectangle['top']),
                               (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                               color = (255,0,0), thickness = 5)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            currEmotion = max(iter(currFace['scores'].items()), key=operator.itemgetter(1))[0]

            textToWrite = '{0}'.format(currEmotion)
            cv2.putText(img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1)

        return img
项目:vbcg    作者:nspi    | 项目源码 | 文件源码
def get_frame(self):
        """This function delivers frames from the camera or the hard disk for the GUI

            Returns:
            status -- False if user has not pressed ''start'' button. If pressed, returns True
            frame -- A black frame is the user has not pressed ''start'' button. Otherwise frame from camera or disk
        """

        # Waiting for the user to press the ''start'' button
        if self.eventVideoReady.is_set():

                # Read current frame from thread
                frame = self.currentFrame

                # Convert color to RGB
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                # Return status and frame
                return True, frame

        else:
            # Return false as status and black frame
            return False, np.zeros((480, 640, 3), np.uint8)
项目:behavioral-cloning    作者:BillZito    | 项目源码 | 文件源码
def change_brightness(img_arr):
  # print('change brightness called')
  adjusted_imgs = np.array([img_arr[0]])
  for img_num in range(0, len(img_arr)):
    img = img_arr[img_num]
    # print('array access')
    # show_image(img)
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) 
    # print('rgb2hsv')
    # show_image(hsv)
    rando = np.random.uniform()
    # print('rando is', rando)
    hsv[:,:, 2] = hsv[:,:, 2] * (.25 + rando)

    new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
    # print('hsv2rgb')
    # show_image(new_img)
    # new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
    # show_images(img.reshape((1,) + img.shape), new_img.reshape((1,) + new_img.shape))
    adjusted_imgs = np.append(adjusted_imgs, new_img.reshape((1,) + new_img.shape), axis=0)

  adjusted_imgs = np.delete(adjusted_imgs, 0, 0)
  return adjusted_imgs
项目:MyoSEMG    作者:LuffyDai    | 项目源码 | 文件源码
def run(self):

        while True:
            if self.flag:
                ret, image = self.camera.read()
                if image is None:
                    break
                color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                height, width, _ = color_swapped_image.shape

                qt_image = QImage(color_swapped_image.data,
                                  width,
                                  height,
                                  color_swapped_image.strides[0],
                                  QImage.Format_RGB888)
                pixmap = QPixmap(qt_image)
                pixmap = pixmap.scaled(self.videoLabel.geometry().width(), self.videoLabel.geometry().height())
                if self.start_flag and self.support_flag:
                    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
                    self.path = "appdata/" + self.cap.guide.dataset_type + "/data/" + self.cap.date_str + "-" + str(
                        self.cap.guide.gesture_type) + ".avi"
                    self.out = cv2.VideoWriter(self.path, fourcc, 20.0, (640, 480))
                    self.support_flag = False
                if self.name == "Camera" and self.out is not None:
                    self.image_siganl.emit(image)
                self.videoLabel.setPixmap(pixmap)
                if self.name == "Video":
                    time.sleep(1/self.fps)
            else:
                pass
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def test_get_palette_min_values(self):
        image = utils.Image.get_images([self.image_clean])[0].image
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        options = namedtuple(
            'options',
            ['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']
        )(
            quiet=True,
            sample_fraction=.01,
            value_threshold=.01,
            sat_threshold=.01,
        )
        samples = noteshrink.sample_pixels(rgb_image, options)
        palette = utils.get_palette(samples, 2, background_value=1,
                                    background_saturation=1)
        test_palette = np.array([[255, 123, 92], [193, 86, 64]])
        assert palette.shape <= test_palette.shape
        # background colors must coincide
        assert np.array_equal(palette[0], test_palette[0])
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def test_get_palette_max_values(self):
        image = utils.Image.get_images([self.image_clean])[0].image
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        options = namedtuple(
            'options',
            ['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']
        )(
            quiet=True,
            sample_fraction=1,
            value_threshold=1,
            sat_threshold=1,
        )
        samples = noteshrink.sample_pixels(rgb_image, options)
        palette = utils.get_palette(samples, 128, background_value=100,
                                    background_saturation=100)
        background_color = np.array([255, 123, 92])
        assert palette.shape <= (128, 3)
        # background colors must coincide
        assert np.array_equal(palette[0], background_color)
项目:image-segmentation-fcn    作者:ljanyst    | 项目源码 | 文件源码
def draw_labels(img, labels, label_colors, convert=True):
    """
    Draw the labels on top of the input image
    :param img:          the image being classified
    :param labels:       the output of the neural network
    :param label_colors: the label color map defined in the source
    :param convert:      should the output be converted to RGB
    """
    labels_colored = np.zeros_like(img)
    for label in label_colors:
        label_mask = labels == label
        labels_colored[label_mask] = label_colors[label]
    img = cv2.addWeighted(img, 1, labels_colored, 0.8, 0)
    if not convert:
        return img
    return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

#-------------------------------------------------------------------------------
项目:kaggle_amazon    作者:asanakoy    | 项目源码 | 文件源码
def predict(model_name, model, images_dir, image_ids, batch_size=64, tile_size=224):
    x_test = np.zeros((len(image_ids), tile_size, tile_size, 3), dtype=np.float32)

    for idx, image_name in tqdm(enumerate(image_ids), total=len(image_ids)):
        # img = imread(join(images_dir, '{}.jpg'.format(image_name)))
        image_path = join(images_dir, '{}.jpg'.format(image_name))
        try:
            img = cv2.imread(image_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = np.asarray(cv2.resize(img, (tile_size, tile_size)), dtype=np.float32)
            x_test[idx, ...] = img
        except Exception as e:
            print e.message
            print 'image:', image_path
    x_test = get_preprocess_input_fn(model_name)(x_test)
    print(x_test.shape)
    predictions = model.predict(x_test, batch_size=batch_size, verbose=1)
    return predictions
项目:RacingRobot    作者:sergionr2    | 项目源码 | 文件源码
def extractInfo(self):
        try:
            while not self.exit:
                try:
                    frame = self.frame_queue.get(block=True, timeout=1)
                except queue.Empty:
                    print("Queue empty")
                    continue
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if self.debug:
                    self.out_queue.put(item=frame, block=False)
                else:
                    if self.frame_num % SAVE_EVERY == 0:
                        cv2.imwrite("debug/{}_{}.jpg".format(experiment_time, self.frame_num), frame)
                        pass
                    try:
                        turn_percent, centroids = processImage(frame)
                        self.out_queue.put(item=(turn_percent, centroids), block=False)
                    except Exception as e:
                        print("Exception in RBGAnalyser processing image: {}".format(e))
                self.frame_num += 1
        except Exception as e:
            print("Exception in RBGAnalyser after loop: {}".format(e))
项目:ecs-deep-learning-workshop    作者:awslabs    | 项目源码 | 文件源码
def predict(filename, mod, synsets):
  img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
  if img is None:
    return None
  img = cv2.resize(img, (224, 224))
  img = np.swapaxes(img, 0, 2)
  img = np.swapaxes(img, 1, 2) 
  img = img[np.newaxis, :] 

  mod.forward(Batch([mx.nd.array(img)]))
  prob = mod.get_outputs()[0].asnumpy()
  prob = np.squeeze(prob)

  a = np.argsort(prob)[::-1]    
  for i in a[0:5]:
    print('probability=%f, class=%s' %(prob[i], synsets[i]))
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def start_stream(self):
        bytes = None

        print("starting stream...")
        stream = urllib2.urlopen(self.address) #'http://192.168.100.102:8080/video'
        bytes = b''

        while True:
            bytes += stream.read(1024)
            a = bytes.find(b'\xff\xd8')
            b = bytes.find(b'\xff\xd9')
            if a != -1 and b != -1:
                jpg = bytes[a:b+2]
                bytes = bytes[b+2:]
                self.image = cv2.cvtColor(cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
                #cv2.imshow('i', self.image)
                #cv2.waitKey(1)
项目:flight-stone    作者:asmateus    | 项目源码 | 文件源码
def hsvModer(self, index, hsv_valueT, hsv_value_B):
        img_BGR = self.img[index]
        img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
        img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)

        lower_red = np.array(hsv_value_B)
        upper_red = np.array(hsv_valueT)

        mask = cv2.inRange(img_HSV, lower_red, upper_red)
        res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
        if self.erosion:
            kernel = np.ones((5, 5), np.uint8)
            res = cv2.erode(res, kernel, iterations=1)
        if self.dilate:
            kernel = np.ones((9, 9), np.uint8)
            res = cv2.dilate(res, kernel, iterations=1)

        return res
项目:flight-stone    作者:asmateus    | 项目源码 | 文件源码
def pullData(self):
        try:
            if self.pth:
                capture = cv2.VideoCapture(1)
                capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.device['baudrate'][1])
                capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.device['baudrate'][0])

                while True:
                    if self.endtr:
                        capture.release()
                        return

                    _, frame = capture.read()
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                    self.response.assignStatus(RESPONSE_STATUS['OK'])
                    self.response.assignData(frame)

                    yield self.response
        except Exception:
            traceback.print_exc(file=sys.stdout)
            self.endCommunication()
            print('Video ended or interrupted, dropped Buffer')
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def display_panel_mergeframe(self, arg_frame, arg_stepX, arg_stepY): 
        print '*** ',len(arg_frame.shape)
        if len(arg_frame.shape)==3:
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB)
        else: 
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_GRAY2RGB)

        tmp_frame= cv2.resize(tmp_frame,(self.mergeframe_splitX,self.mergeframe_splitY),interpolation=cv2.INTER_LINEAR)
        begX= gui_vars.interval_x+self.mergeframe_splitX*arg_stepX
        begY= self.mergeframe_spaceY+ self.mergeframe_splitY* arg_stepY 
        self.mergeframe[begY:begY+ self.mergeframe_splitY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        #begY= self.mergeframe_height- 50- self.mergeframe_splitY*arg_stepY
        #self.mergeframe[begY-self.mergeframe_splitY:begY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        self.mergeframe_stepX= arg_stepX
        self.mergeframe_stepY= arg_stepY
        print '>> mergeframe_splitY, splitX= ', self.mergeframe_splitY, ', ', self.mergeframe_splitX
        print '>> tmp_frame.shape[0,1]= ', tmp_frame.shape[0],', ',tmp_frame.shape[1]

        result = Image.fromarray(self.mergeframe)
        result = ImageTk.PhotoImage(result)
        self.panel_mergeframe.configure(image = result)
        self.panel_mergeframe.image = result
项目:caffeNetViewer    作者:birolkuyumcu    | 项目源码 | 文件源码
def runCaffeModel(self):
        iname = str(self.ui.comboBoxImage.currentText())
        self.cImg = cv2.imread(iname)
        self.cImg = cv2.cvtColor(self.cImg, cv2.COLOR_BGR2RGB)
        self.ui.plainTextEdit.appendPlainText('Model Running ... ')
        self.ui.plainTextEdit.appendPlainText('  Image Name : '+iname)
        self.ui.plainTextEdit.appendPlainText("  Image Shape : " + str(self.cImg.shape))
        self.ui.plainTextEdit.appendPlainText("  Model Input Image Shape : " + str(self.net.blobs['data'].data.shape))  

        transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        transformer.set_transpose('data', (2,0,1))  # move image channels to outermost dimension
        #transformer.set_mean('data', mu)            # subtract the dataset-mean value in each channel
        transformer.set_raw_scale('data', 255)      # rescale from [0, 1] to [0, 255]
        transformer.set_channel_swap('data', (2,1,0))  # swap channels from RGB to BGR          

        image = caffe.io.load_image(iname)
        inData = transformer.preprocess('data', image)

        self.net.blobs['data'].data[...] = [inData]

        self.outClass = self.net.forward()
        self.on_comboBoxLayers_currentIndexChanged()
项目:Python_SelfLearning    作者:fukuit    | 项目源码 | 文件源码
def facedetect(file):
    """ haar????????????????????????
    Args:
        file : ????????????
    """
    face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
    img = cv2.imread(file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray)
        for(ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()
项目:Lifting-from-the-Deep-release    作者:DenisTome    | 项目源码 | 文件源码
def main():
    image = cv2.imread(IMAGE_FILE_PATH)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    # Show 2D and 3D poses
    display_results(image, pose_2d, visibility, pose_3d)
项目:VariationalAutoEncoder    作者:despoisj    | 项目源码 | 文件源码
def imscatter(x, y, ax, imageData, zoom=1):
    images = []
    for i in range(len(x)):
        x0, y0 = x[i], y[i]
        # Convert to image
        img = imageData[i]*255.
        img = img.astype(np.uint8)
        # OpenCV uses BGR and plt uses RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        image = OffsetImage(img, zoom=zoom)
        ab = AnnotationBbox(image, (x0, y0), xycoords='data', frameon=False)
        images.append(ax.add_artist(ab))

    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()

# Show dataset images with T-sne projection of latent space encoding
项目:pyImageClassification    作者:tyiannak    | 项目源码 | 文件源码
def getFeaturesFace(img):
    RGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
    (cascadeFrontal, cascadeProfile, storage) = initialize_face()
    facesFrontal = detect_faces(cv2.cv.fromarray(RGB), cascadeFrontal, cascadeProfile, storage)

    tempF = 0.0
    faceSizes = []
    for f in facesFrontal:
        faceSizes.append(f[2] * f[3] / float(img.shape[0] * img.shape[1]))

    F = []
    F.append(len(facesFrontal))
    if len(facesFrontal)>0:
        F.append(min(faceSizes))
        F.append(max(faceSizes))
        F.append(numpy.mean(faceSizes))
    else:
        F.extend([0, 0, 0]);

    Fnames = ["Faces-Total", "Faces-minSizePer", "Faces-maxSizePer", "Faces-meanSizePer"]
    return (F, Fnames)
    #print F
    #print tempF/len(facesFrontal)
项目:saliency-salgan-2017    作者:imatge-upc    | 项目源码 | 文件源码
def load(self):

        if self.imageType == InputType.image:
            self.data = cv2.cvtColor(cv2.imread(self.filePath, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
            self.state = LoadState.loaded
        if self.imageType == InputType.imageGrayscale:
            self.data = cv2.cvtColor(cv2.imread(self.filePath, cv2.IMREAD_COLOR), cv2.COLOR_BGR2GRAY)
            self.state = LoadState.loaded
        elif self.imageType == InputType.saliencyMapMatlab:
            self.data = (scipy.io.loadmat(self.filePath)['I'] * 255).astype(np.uint8)
            self.state = LoadState.loaded
        elif self.imageType == InputType.fixationMapMatlab:
            self.data = (scipy.io.loadmat(self.filePath)['I']).nonzero()
            self.state = LoadState.loaded
        elif self.imageType == InputType.empty:
            self.data = None
项目:yolov2_tensorflow    作者:biyaa    | 项目源码 | 文件源码
def detect(img):
    #print img
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (cfg.image_size, cfg.image_size)).astype(np.float32)

    #inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) 
    inputs = np.reshape(inputs, (1, cfg.image_size, cfg.image_size, 3))
    #inputs = np.transpose(inputs,(0,3,2,1))

    result = detect_from_cvmat(inputs)[0]

    for i in range(len(result)):
        left = (result[i][1] - result[i][3]/2)*img_w
        right = (result[i][1] + result[i][3]/2)*img_w
        top = (result[i][2] - result[i][4]/2)*img_h
        bot = (result[i][2] + result[i][4]/2)*img_h
        result[i][1] = left if left>0 else 0
        result[i][2] = right if right<img_w-1 else img_w-1
        result[i][3] = top if top>0 else 0
        result[i][4] = bot if bot<img_h-1 else img_h-1

    print "result:", result
    return result
项目:suiron    作者:kendricktan    | 项目源码 | 文件源码
def get_frame_prediction(self):
        ret, frame = self.cap.read()

        # if we get a frame
        if not ret:
            raise IOError('No image found!')

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
        frame = frame.astype('uint8')

        return frame


    # Normalizes inputs so we don't have to worry about weird
    # characters e.g. \r\n
项目:mxnet-yolo    作者:zhreshold    | 项目源码 | 文件源码
def commit(self):
        def draw(img,bboxes):
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            for b in bboxes:
                xmin,ymin,xmax,ymax = b[:]
                cv2.rectangle(img, (xmin,ymin),  (xmax,ymax),(255,255,0) ,thickness=2)
            return img
        def make_frame(t):
            idx = t*(self.clip.fps/self.fps)
            frm = self.clip.get_frame(t)
            height ,width = frm.shape[:2]
            for t,bboxes in self.record:
                if t==idx:        
                    frm = draw(frm,bboxes)
                else:
                    pass
            return frm
        new_clip = VideoClip(make_frame, duration=self.clip.duration) # 3-second clip
        new_clip.fps=self.clip.fps
        new_clip.to_videofile(self.output_path)
项目:yolov2-tensorflow    作者:shishichang    | 项目源码 | 文件源码
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def load_img(path, grayscale=False, target_size=None):
    """Utility function to load an image from disk.

    Args:
      path: The image file path.
      grayscale: True to convert to grayscale image (Default value = False)
      target_size: (w, h) to resize. (Default value = None)

    Returns:
        The loaded numpy image.
    """
    img = io.imread(path, grayscale)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    if target_size:
        img = cv2.resize(img, (target_size[1], target_size[0]))
    return img
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def load_data(filename):
        """Loads a data matrix from a given file.

        Parameters
        ----------
        filename : :obj:`str`
            The file to load the data from. Must be one of .png, .jpg,
            .npy, or .npz.

        Returns
        -------
        :obj:`numpy.ndarray`
            The data array read from the file.
        """
        file_root, file_ext = os.path.splitext(filename)
        data = None
        if file_ext.lower() in COLOR_IMAGE_EXTS:
            data = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
        elif file_ext == '.npy':
            data = np.load(filename)
        elif file_ext == '.npz':
            data = np.load(filename)['arr_0']
        else:
            raise ValueError('Extension %s not supported' % (file_ext))
        return data