Python cv2 模块,COLOR_RGB2BGR 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.COLOR_RGB2BGR

项目:PixivAvatarBot    作者:kophy    | 项目源码 | 文件源码
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True;
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1]
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
项目:bot2017Fin    作者:AllanYiin    | 项目源码 | 文件源码
def equal_color(img: Image, color):
    arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
    boundaries = []
    boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
                       [min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
    for (lower, upper) in boundaries:
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(arr_img, lower, upper)
        res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
        res = cv2.resize(res, (img.size[0], img.size[1]))
        cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(cv2_im)

        return output_img
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def write(filepath, image):
    """Saves an image or a frame to the specified path.
    Parameters
    ----------
    filepath: str
        The path to the file.
    image: ndarray(float/int)
        The image data.
    value_range: int (e.g. VALUE_RANGE_0_1)
        The value range of the provided image data.
    """
    dirpath = os.path.dirname(filepath)
    if not os.path.exists(dirpath):
        os.makedirs(dirpath)

    if image.shape[2] == 3:
        image = cast(image)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    factor = 1
    if is_float_image(image):
        factor = 255

    cv2.imwrite(filepath, image * factor)
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def forward(self, rgbImg):
        """
        Perform a forward network pass of an RGB image.

        :param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)
        :type rgbImg: numpy.ndarray
        :return: Vector of features extracted from the neural network.
        :rtype: numpy.ndarray
        """
        assert rgbImg is not None

        t = '/tmp/openface-torchwrap-{}.png'.format(
            binascii.b2a_hex(os.urandom(8)))
        bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)
        cv2.imwrite(t, bgrImg)
        rep = self.forwardPath(t)
        os.remove(t)
        return rep
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def forward(self, rgbImg):
        """
        Perform a forward network pass of an RGB image.

        :param rgbImg: RGB image to process. Shape: (imgDim, imgDim, 3)
        :type rgbImg: numpy.ndarray
        :return: Vector of features extracted from the neural network.
        :rtype: numpy.ndarray
        """
        assert rgbImg is not None

        t = '/tmp/openface-torchwrap-{}.png'.format(
            binascii.b2a_hex(os.urandom(8)))
        bgrImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)
        cv2.imwrite(t, bgrImg)
        rep = self.forwardPath(t)
        os.remove(t)
        return rep
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def draw_outputs(img, boxes, confidences, wait=1):
    I = img * 255.0

    #nms = non_max_suppression_fast(np.asarray(filtered_boxes), 1.00)
    picks = postprocess_boxes(boxes, confidences)

    for box, conf, top_label in picks:#[filtered[i] for i in picks]:
        if top_label != classes:
            #print("%f: %s %s" % (conf, coco.i2name[top_label], box))

            c = colorsys.hsv_to_rgb(((top_label * 17) % 255) / 255.0, 1.0, 1.0)
            c = tuple([255*c[i] for i in range(3)])

            draw_ann(I, box, i2name[top_label], color=c, confidence=conf)

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("outputs", I)
    cv2.waitKey(wait)
项目:textboxes    作者:shinjayne    | 项目源码 | 文件源码
def draw_outputs(img, boxes, confidences, wait=1):
    I = img * 255.0

    #nms = non_max_suppression_fast(np.asarray(filtered_boxes), 1.00)
    picks = postprocess_boxes(boxes, confidences)

    for box, conf, top_label in picks:#[filtered[i] for i in picks]:
        if top_label != classes:
            #print("%f: %s %s" % (conf, coco.i2name[top_label], box))

            c = colorsys.hsv_to_rgb(((top_label * 17) % 255) / 255.0, 1.0, 1.0)
            c = tuple([255*c[i] for i in range(3)])

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("outputs", I)
    cv2.waitKey(wait)
项目:fatego-auto    作者:lishunan246    | 项目源码 | 文件源码
def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
项目:img_classifier_prepare    作者:zonekey    | 项目源码 | 文件源码
def post(self):
        '''  curl --request POST -data-binary "@fname.jpg" --header "Content-Type: image/jpg"  http://localhost:8899/pic
             ??????? ...
        '''
        global cf,lock

        body = self.request.body
        try:
            img = Image.open(StringIO.StringIO(body))
            img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR)
            lock.acquire()
            pred = cf.predicate(img)
            lock.release()
            rx = { "result": [] }
            for i in range(0, 3):
                r = { 'title': pred[i][1], 'score': float(pred[i][2]) }
                rx['result'].append(r)
            self.finish(rx)
        except Exception as e:
            print e
            self.finish(str(e))



# ?????????????? url?????????????? ..
项目:osrmacro    作者:jjvilm    | 项目源码 | 文件源码
def shoot(x1,y1,x2,y2, *args, **kwargs):
    """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it"""
    # creates widht & height for screenshot region
    w = x2 - x1
    h = y2 - y1
    # PIL format as RGB
    img = pyautogui.screenshot(region=(x1,y1,w,h)) #X1,Y1,X2,Y2
    #im.save('screenshot.png')

    # Converts to an array used for OpenCV
    img = np.array(img)

    try:
        for arg in args:
            if arg == 'hsv':
                # Converts to BGR format for OpenCV
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                return hsv_img

            if arg == 'rgb':
                rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                return rgb_img
    except:
        pass

    cv_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return cv_gray
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def decode_rgb(self, data): 
        w, h = data.image.width, data.image.height;
        if data.image.image_data_format == self.image_msg_t_.VIDEO_RGB_JPEG: 
            img = cv2.imdecode(np.asarray(bytearray(data.image.image_data), dtype=np.uint8), -1)
            bgr = img.reshape((h,w,3))[::self.skip, ::self.skip, :]             
        else: 
            img = np.fromstring(data.image.image_data, dtype=np.uint8)
            rgb = img.reshape((h,w,3))[::self.skip, ::self.skip, :] 
            bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        if not self.bgr: 
            return cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
        else: 
            return bgr
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _process_items(self, index, rgb_im, depth_im, instance, label, bbox, pose): 
        # print 'Processing pose', pose, bbox


        # def _process_bbox(bbox): 
        #     return dict(category=bbox['category'], target=UWRGBDDataset.target_hash[str(bbox['category'])], 
        #                 left=bbox.coords['left'], right=bbox['right'], top=bbox['top'], bottom=bbox['bottom'])

        # # Compute bbox from pose and map (v2 support)
        # if self.version == 'v1': 
        #     if bbox is not None: 
        #         bbox = [_process_bbox(bb) for bb in bbox]
        #         bbox = filter(lambda bb: bb['target'] in UWRGBDDataset.train_ids_set, bbox)

        # if self.version == 'v2': 
        #     if bbox is None and hasattr(self, 'map_info'): 
        #         bbox = self.get_bboxes(pose)

        # print 'Processing pose', pose, bbox

        rgb_im = np.swapaxes(rgb_im, 0, 2)
        rgb_im = cv2.cvtColor(rgb_im, cv2.COLOR_RGB2BGR)

        depth_im = np.swapaxes(depth_im, 0, 1) * 1000
        instance = np.swapaxes(instance, 0, 1)
        label = np.swapaxes(label, 0, 1)

        return AttrDict(index=index, img=rgb_im, depth=depth_im, instance=instance, 
                        label=label, bbox=bbox if bbox is not None else [], pose=pose)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_color(im, flip_rb=False): 
    if im.ndim == 2: 
        return cv2.cvtColor(im, cv2.COLOR_GRAY2RGB if flip_rb else cv2.COLOR_GRAY2BGR)
    else: 
        return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if flip_rb else im.copy()
项目:spikefuel    作者:duguyue100    | 项目源码 | 文件源码
def read_video(v_name):
    """A workaround function for reading video.

    Apparently precompiled OpenCV couldn't read AVI videos on Mac OS X
    and Linux,
    therefore I use PyAV, a ffmpeg binding to extract video frames

    Parameters
    ----------
    v_name : string
        absolute path to video

    Returns
    -------
    frames : list
        An ordered list for storing frames
    num_frames : int
        number of frames in the video
    """
    container = av.open(v_name)
    video = next(s for s in container.streams if s.type == b'video')

    frames = []
    for packet in container.demux(video):
        for frame in packet.decode():
            frame_t = np.array(frame.to_image())
            frames.append(cv2.cvtColor(frame_t, cv2.COLOR_RGB2BGR))

    return frames, len(frames)
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def blur_image(self, save=False, show=False):
        if self.part is None:
            psf = self.PSFs
        else:
            psf = [self.PSFs[self.part]]
        yN, xN, channel = self.shape
        key, kex = self.PSFs[0].shape
        delta = yN - key
        assert delta >= 0, 'resolution of image should be higher than kernel'
        result=[]
        if len(psf) > 1:
            for p in psf:
                tmp = np.pad(p, delta // 2, 'constant')
                cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                # blured = np.zeros(self.shape)
                blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_32F)
                blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
                blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
                blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
                blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
                blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
                result.append(np.abs(blured))
        else:
            psf = psf[0]
            tmp = np.pad(psf, delta // 2, 'constant')
            cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_32F)
            blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
            blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
            blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
            blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
            blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
            result.append(np.abs(blured))
        self.result = result
        if show or save:
            self.__plot_canvas(show, save)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:u1234x1234    | 项目源码 | 文件源码
def get_rgb_image(img_id, h=None, w=None):
    image = get_rgb_data(img_id)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    for c in range(3):
        min_val, max_val = np.percentile(image[:, :, c], [2, 98])
        image[:, :, c] = 255*(image[:, :, c] - min_val) / (max_val - min_val)
        image[:, :, c] = np.clip(image[:, :, c], 0, 255)
    image = (image).astype(np.uint8)
    if h and w:
        image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
    return image
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def get_scaled_translated_img_bb(self, name, bb):
        im = imread(name)
        img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
        h0, w0, _ = self.img_size
        wc, hc = (bb[0] + bb[2]) / 2, (bb[1] + bb[3]) / 2
        face_width = (bb[3] - bb[1]) / 2
        # Old approach: scale and then translate
        res, new_face_width, shc, swc = self.compute_scale_factor(
            img, face_width, hc, wc)
        thc, twc = self.compute_translation(new_face_width, shc, swc)
        # New approach: translate and then scale
        # thc, twc = self.compute_translation(face_width, hc, wc,
        #                                     min_pad=self.MIN_FACE_SIZE + 10)
        # high_scale = np.min([thc - 5, h0 - thc - 5, twc - 5, w0 - twc - 5])
        # res, new_face_width, shc, swc = self.compute_scale_factor(
        #     img, face_width, hc, wc,
        #     high_scale=high_scale, low_scale=None)
        out_bgr, new_bb = self.copy_source_to_target(res, new_face_width,
                                                     shc, swc, thc, twc)

        log = "%.1f,%.1f,%.0f\n" % (
            (new_bb[1] + new_bb[3]) / 2, (new_bb[0] + new_bb[2]) / 2, new_face_width * 2)
        with open('aug.csv', mode='a', buffering=0) as f:
            f.write(log)
        # cv2.rectangle(out_bgr, (int(new_bb[0]), int(new_bb[1])), (int(new_bb[2]), int(new_bb[3])),
        #               (255, 255, 0), thickness=4)
        # cv2.imwrite("%d.jpg" % os.getpid(), out_bgr)
        # sys.exit(0)
        out = cv2.cvtColor(out_bgr, cv2.COLOR_BGR2RGB)
        return out, new_bb
项目:tensorflow-action-conditional-video-prediction    作者:williamd4112    | 项目源码 | 文件源码
def main(args):
    from tfacvp.model import ActionConditionalVideoPredictionModel
    from tfacvp.util import post_process_rgb

    with tf.Graph().as_default() as graph:    
        logging.info('Create model [num_act = %d] for testing' % (args.num_act))
        model = ActionConditionalVideoPredictionModel(num_act=args.num_act, is_train=False)

        config = get_config(args)
        s = np.load(args.data)
        mean = np.load(args.mean)
        scale = 255.0

        with tf.Session(config=config) as sess:
            logging.info('Loading weights from %s' % (args.load))
            model.restore(sess, args.load)

            for i in range(args.num_act):
                logging.info('Predict next frame condition on action %d' % (i))
                a = np.identity(args.num_act)[i]
                x_t_1_pred_batch = model.predict(sess, s[np.newaxis, :], a[np.newaxis, :])[0]

                img = x_t_1_pred_batch[0]
                img = post_process(img, mean, scale)
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                cv2.imwrite('pred-%02d.png' % i, img)
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def main(args):
    from tfacvp.model import ActionConditionalVideoPredictionModel
    from tfacvp.util import post_process_rgb

    with tf.Graph().as_default() as graph:    
        logging.info('Create model [num_act = %d] for testing' % (args.num_act))
        model = ActionConditionalVideoPredictionModel(num_act=args.num_act, is_train=False)

        config = get_config(args)
        s = np.load(args.data)
        mean = np.load(args.mean)
        scale = 255.0

        with tf.Session(config=config) as sess:
            logging.info('Loading weights from %s' % (args.load))
            model.restore(sess, args.load)

            for i in range(args.num_act):
                logging.info('Predict next frame condition on action %d' % (i))
                a = np.identity(args.num_act)[i]
                x_t_1_pred_batch = model.predict(sess, s[np.newaxis, :], a[np.newaxis, :])[0]

                img = x_t_1_pred_batch[0]
                img = post_process(img, mean, scale)
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                cv2.imwrite('pred-%02d.png' % i, img)
项目:pycaffe-yolo    作者:Zehaos    | 项目源码 | 文件源码
def main(argv):
    model_filename = ''
    weight_filename = ''
    img_filename = ''
    try:
        opts, args = getopt.getopt(argv, "hm:w:i:")
        print opts
    except getopt.GetoptError:
        print 'yolo_main.py -m <model_file> -w <output_file> -i <img_file>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'yolo_main.py -m <model_file> -w <weight_file> -i <img_file>'
            sys.exit()
        elif opt == "-m":
            model_filename = arg
        elif opt == "-w":
            weight_filename = arg
        elif opt == "-i":
            img_filename = arg
    print 'model file is "', model_filename
    print 'weight file is "', weight_filename
    print 'image file is "', img_filename

    caffe.set_device(0)
    caffe.set_mode_gpu()
    net = caffe.Net(model_filename, weight_filename, caffe.TEST)
    img = caffe.io.load_image(img_filename)  # load the image using caffe io
    img_ = scipy.misc.imresize(img, (448, 448))
    transformer = SimpleTransformer([104.00699, 116.66877, 122.67892])
    input = transformer.preprocess(img_)
    out = net.forward_all(data=input)
    print out.iteritems()
    img_cv = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    results = interpret_output(out['result'][0], img.shape[1], img.shape[0])  # fc27 instead of fc12 for yolo_small
    show_results(img_cv, results, img.shape[1], img.shape[0])
    cv2.waitKey(0)
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def save_constraints(self):
        [im_c, mask_c, im_e, mask_e] = self.combine_constraints(self.constraints)
        # write image
        # im_c2 = cv2.cvtColor(im_c, cv2.COLOR_RGB2BGR)
        # cv2.imwrite('input_color_image.png', im_c2)
        # cv2.imwrite('input_color_mask.png', mask_c)
        # cv2.imwrite('input_edge_map.png', im_e)
        self.prev_im_c = im_c.copy()
        self.prev_mask_c = mask_c.copy()
        self.prev_im_e = im_e.copy()
        self.prev_mask_e =mask_e.copy()
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def rec_test(test_data, n_epochs=0, batch_size=128, output_dir=None):

    print('computing reconstruction loss on test images')
    rec_imgs = []
    imgs = []
    costs = []
    ntest = len(test_data)

    for n in tqdm(range(ntest / batch_size)):
        imb = test_data[n*batch_size:(n+1)*batch_size, ...]
        # imb = train_dcgan_utils.transform(xmb, nc=3)
        [cost, gx] = _train_p_cost(imb)
        costs.append(cost)
        ntest = ntest + 1
        if n == 0:
            utils.print_numpy(imb)
            utils.print_numpy(gx)
            imgs.append(train_dcgan_utils.inverse_transform(imb, npx=npx, nc=nc))
            rec_imgs.append(train_dcgan_utils.inverse_transform(gx, npx=npx, nc=nc))

    if output_dir is not None:
        # st()
        save_samples = np.hstack(np.concatenate(imgs, axis=0))
        save_recs = np.hstack(np.concatenate(rec_imgs, axis=0))
        save_comp = np.vstack([save_samples, save_recs])
        mean_cost = np.mean(costs)

        txt = 'epoch = %3.3d, cost = %3.3f' % (n_epochs, mean_cost)

        width = save_comp.shape[1]
        save_f = (save_comp*255).astype(np.uint8)
        html.save_image([save_f], [''], header=txt, width=width, cvt=True)
        html.save()
        save_cvt = cv2.cvtColor(save_f, cv2.COLOR_RGB2BGR)
        cv2.imwrite(os.path.join(rec_dir, 'rec_epoch_%5.5d.png'%n_epochs), save_cvt)

    return mean_cost
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model, COOKED_PHRASES, RAW_PHRASES   
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    # Get features
    features = describe(image, mask)

    # Predict it
    result = model.predict([features])
    probability = model.predict_proba([features])[0][result][0]        
    state = le.inverse_transform(result)[0]

    phrase = ''

    if 'cook' in state:
        phrase = COOKED_PHRASES[int(random.random()*len(COOKED_PHRASES))]
    elif 'raw' in state:
        phrase = RAW_PHRASES[int(random.random()*len(RAW_PHRASES))]

    return {'type': state, 'confidence': probability, 'phrase': phrase}
项目:focal-loss    作者:unsky    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def __init__(self, query, x=0, y=0):
        self.query = query
        self.xThreshold = x
        self.yThreshold = y
        if type(query) is Pillow.Image.Image:
            self.query = cv2.cvtColor(np.array(self.query), cv2.COLOR_RGB2BGR)
        elif type(query) is np.ndarray:
            self.query = query
        else:
            self.query = cv2.imread(query, 0)
        self.goodMatches = []
        self.images = []
        self.circlePoints = []
        self.kmeans = None
        self.white_query = None
项目:neural_style_synthesizer    作者:dwango    | 项目源码 | 文件源码
def convert_video(self, video_path, output_directory, skip=0, resize=400):
        video = cv2.VideoCapture(video_path)
        video_output = None
        i = 0
        img_init = None
        while video.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO) < 1.0:
            i += 1
            for _ in range(skip+1):
                status, bgr_img = video.read()
            img = PIL.Image.fromarray(cv2.cvtColor(
                bgr_img,
                cv2.COLOR_BGR2RGB
            ))
            img = neural_art.utility.resize_img(img, resize)
            if video_output is None:
                video_output = cv2.VideoWriter(
                    "{}/out.avi".format(output_directory),
                    fourcc=0, #raw
                    fps=video.get(cv2.cv.CV_CAP_PROP_FPS) / (skip + 1),
                    frameSize=img.size,
                    isColor=True
                )
                if(not video_output.isOpened()):
                    raise(Exception("Cannot Open VideoWriter"))
            if img_init is None:
                img_init = img
            converted_img = self.frame_converter.convert(img, init_img=img_init, iteration=self.iteration)
            converted_img.save("{}/converted_{:05d}.png".format(output_directory, i))
            img_init = converted_img
            video_output.write(cv2.cvtColor(
                numpy.asarray(converted_img),
                cv2.COLOR_RGB2BGR
            ))
        video_output.release()
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def main(_):
    loader = Loader(FLAGS.data_dir, FLAGS.data, FLAGS.batch_size)
    print("# of data: {}".format(loader.data_num))
    with tf.Session() as sess:                                
        lsgan = LSGAN([FLAGS.batch_size, 112, 112, 3])
        sess.run(tf.global_variables_initializer())

        for epoch in range(10000):
            loader.reset()

            for step in range(int(loader.batch_num/FLAGS.d)):
                if (step == 0 and epoch % 1 == 100):
                    utils.visualize(sess.run(lsgan.gen_img), epoch)

                for _ in range(FLAGS.d):
                    batch = np.asarray(loader.next_batch(), dtype=np.float32)
                    batch = (batch-127.5) / 127.5
                    #print("{}".format(batch.shape))
                    feed={lsgan.X: batch}
                    _ = sess.run(lsgan.d_train_op, feed_dict=feed)
                        #utils.visualize(batch, (epoch+1)*100)

                #cv2.namedWindow("window")
                #cv2.imshow("window", cv2.cvtColor(batch[0], cv2.COLOR_RGB2BGR))
                #cv2.waitKey(0)
                #cv2.destroyAllWindows()

                _ = sess.run(lsgan.g_train_op)
项目:Deformable-ConvNets    作者:msracver    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:Deformable-ConvNets    作者:msracver    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:Deformable-ConvNets    作者:msracver    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:Deep-Feature-Flow    作者:msracver    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:Deep-Feature-Flow    作者:msracver    | 项目源码 | 文件源码
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:rl_3d    作者:avdmitry    | 项目源码 | 文件源码
def Observation(self):
        obs = self.env.observations()
        img = obs["RGB_INTERLACED"]
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        return img
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def imread_from_base64(base64_str):
    sbuf = StringIO()
    sbuf.write(base64.b64decode(base64_str))
    pimg = Image.open(sbuf)
    return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def exportCV2(self):
        '''
        Use cv2.imwrite() to save the image array
        '''
        w = self.display.widget

        def fn(path, img):
            r = self.pRange.value()
            if r == '0-max':
                r = (0, w.levelMax)
            elif r == 'min-max':
                r = (w.levelMin, w.levelMax)
            else:  # 'current'
                r = w.ui.histogram.getLevels()
            int_img = toUIntArray(img,
                                  # cutNegative=self.pCutNegativeValues.value(),
                                  cutHigh=~self.pStretchValues.value(),
                                  range=r,
                                  dtype={'8 bit': np.uint8,
                                         '16 bit': np.uint16}[
                                      self.pDType.value()])
            if isColor(int_img):
                int_img = cv2.cvtColor(int_img, cv2.COLOR_RGB2BGR)

            cv2.imwrite(path, int_img)

        return self._export(fn)
项目:ssd_pytorch    作者:miraclebiu    | 项目源码 | 文件源码
def draw_all_detection(im, detections, class_names, scale = 1.0):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    # im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    #im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            # if score < threshold:
            #     continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
项目:DQN    作者:boluoweifenda    | 项目源码 | 文件源码
def observe(self):
        if self.show is True:
            cv2.imshow("show", cv2.cvtColor(self.env.getScreenRGB(),cv2.COLOR_RGB2BGR))
            cv2.waitKey(self.delay)
        return cv2.resize(self.env.getScreenGrayscale(), (self.width, self.height), interpolation=cv2.INTER_LINEAR)
            # return (cv2.resize(cv2.cvtColor(self.env.getScreenRGB(),cv2.COLOR_BGR2YUV)[:,:,0], (self.width, self.height) , interpolation=cv2.INTER_LINEAR)) #/ np.float32(255)
项目:esper    作者:scanner-research    | 项目源码 | 文件源码
def make_thumbnail(video, db):
    indices = [int(n * video.num_frames) for n in [0.1, 0.35, 0.60, 0.85]]
    table = db.table(video.path)
    frames = [f[0] for _, f in table.load([1], rows=indices)]
    img = make_montage(len(frames), iter(frames), frame_width=150, frames_per_row=2)
    run('mkdir -p assets/thumbnails')
    cv2.imwrite('assets/thumbnails/{}.jpg'.format(video.id), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
项目:esper    作者:scanner-research    | 项目源码 | 文件源码
def execute(self, columns):
        global i
        [img, bboxes] = columns
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        [h, w] = img.shape[:2]
        bboxes = parsers.bboxes(bboxes, self.protobufs)
        imgs = [img[int(h*bbox.y1):int(h*bbox.y2), int(w*bbox.x1):int(w*bbox.x2)]
                for bbox in bboxes]
        for img in imgs:
            cv2.imwrite('/app/tmp/{:05d}.jpg'.format(i), img)
            i += 1
        genders = self.rc.get_gender_batch(imgs)
        outputs = [struct.pack('=cf', label, score) for [label, score] in genders]
        assert(len(outputs) == len(imgs))
        return [''.join(outputs)]
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def faceCrop(targetDir, imgList, color, single_face):
    # Load list of Haar cascades for faces
    faceCascades = load_cascades()

    # Iterate through images
    face_list = []
    for img in imgList:
        if os.path.isdir(img):
            continue
        pil_img = Image.open(img)
        if color:
            cv_img  = cv.cvtColor(np.array(pil_img), cv.COLOR_RGB2BGR)
        else:
            cv_img = np.array(pil_img)
            # Convert to grayscale if this image is actually color
            if cv_img.ndim == 3:
                cv_img = cv.cvtColor(np.array(pil_img), cv.COLOR_BGR2GRAY)

        # Detect all faces in this image
        scaled_img, faces = DetectFace(cv_img, color, faceCascades, single_face, second_pass=False, draw_rects=False)

        # Iterate through faces
        n=1
        for face in faces:
            cropped_cv_img = imgCrop(scaled_img, face, scale=1.0)
            if color:
                cropped_cv_img = rgb(cropped_cv_img)
            fname, ext = os.path.splitext(img)
            cropped_pil_img = Image.fromarray(cropped_cv_img)
            #save_name = loc + '/cropped/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            save_name = targetDir + '/' + fname.split('/')[-1] + '_crop' + str(n) + ext
            cropped_pil_img.save(save_name)
            face_list.append(save_name)
            n += 1

    return face_list

# Add an emoji to an image at a specified point and size
# Inputs: img, emoji are ndarrays of WxHx3
#         faces is a list of (x,y,w,h) tuples for each face to be replaced
项目:faststyle    作者:ghwatson    | 项目源码 | 文件源码
def imwrite(path, img):
    """Wrapper around cv2.imwrite. Switches it to RGB input convention.

    :param path:
        String indicating path to save image to.
    :param img:
        3D RGB numpy array of image.
    """
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    cv2.imwrite(path, img)
项目:slide-transition-detector    作者:brene    | 项目源码 | 文件源码
def convert_to_opencv(img):
    return cv2.cvtColor(numpy.array(img.convert('RGB')), cv2.COLOR_RGB2BGR)
项目:citysim3d    作者:alexlee-gk    | 项目源码 | 文件源码
def main():
    # actions are translation and angular speed (angular velocity constraint to the (0, 0, 1) axis)
    action_space = TranslationAxisAngleSpace(low=[-10, -10, -10, -np.pi/4],
                                             high=[10, 10, 10, np.pi/4],
                                             axis=[0, 0, 1])
    env = SimpleQuadPanda3dEnv(action_space, sensor_names=['image', 'depth_image'])

    num_trajs = 10
    num_steps = 100
    done = False
    for traj_iter in range(num_trajs):
        env.reset()
        for step_iter in range(num_steps):
            action = action_space.sample()
            obs, _, _, _ = env.step(action)
            image, depth_image = obs['image'], obs['depth_image']

            # convert BGR image to RGB image
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.imshow("image", image)

            # rescale depth image to be between 0 and 255
            depth_scale = depth_image.max() - depth_image.min()
            depth_offset = depth_image.min()
            depth_image = np.clip((depth_image - depth_offset) / depth_scale, 0.0, 1.0)
            depth_image = (255.0 * depth_image).astype(np.uint8)
            cv2.imshow("depth image", depth_image)

            env.render()

            key = cv2.waitKey(10)
            key &= 255
            if key == 27 or key == ord('q'):
                print("Pressed ESC or q, exiting")
                done = True

            if done:
                break
        if done:
            break
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def draw_matches(I, boxes, matches, anns):
    I = np.copy(I) * 255.0

    for o in range(len(layer_boxes)):
        for y in range(c.out_shapes[o][2]):
            for x in range(c.out_shapes[o][1]):
                for i in range(layer_boxes[o]):
                    match = matches[o][x][y][i]

                    # None if not positive nor negative
                    # -1 if negative
                    # ground truth indices if positive

                    if match == -1:
                        coords = center2cornerbox(boxes[o][x][y][i])
                        draw_rect(I, coords, (255, 0, 0))
                    elif isinstance(match, tuple):
                        coords = center2cornerbox(boxes[o][x][y][i])
                        draw_rect(I, coords, (0, 0, 255))
                        # elif s == 2:
                        #    draw_rect(I, boxes[o][x][y][i], (0, 0, 255), 2)

    for gt_box, id in anns:
        draw_rect(I, gt_box, (0, 255, 0), 3)
        cv2.putText(I, i2name[id], (int(gt_box[0] * image_size), int((gt_box[1] + gt_box[3]) * image_size)),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("matches", I)
    cv2.waitKey(1)
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def draw_matches2(I, pos, neg, true_labels, true_locs):
    I = np.copy(I) * 255.0
    index = 0

    for o in range(len(layer_boxes)):
        for y in range(c.out_shapes[o][2]):
            for x in range(c.out_shapes[o][1]):
                for i in range(layer_boxes[o]):
                    if pos[index] > 0:
                        d = c.defaults[o][x][y][i]
                        coords = default2cornerbox(d, true_locs[index])
                        draw_rect(I, coords, (0, 255, 0))
                        coords = center2cornerbox(d)
                        draw_rect(I, coords, (0, 0, 255))
                        cv2.putText(I, i2name[true_labels[index]],
                                    (int(coords[0] * image_size), int((coords[1] + coords[3]) * image_size)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
                    elif neg[index] > 0:
                        pass
                        #d = defaults[o][x][y][i]
                        #coords = default2global(d, pred_locs[index])
                        #draw_rect(I, coords, (255, 0, 0))
                        #cv2.putText(I, coco.i2name[true_labels[index]],
                        #            (int(coords[0] * image_size), int((coords[1] + coords[3]) * image_size)),
                        #            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0))

                    index += 1

    I = cv2.cvtColor(I.astype(np.uint8), cv2.COLOR_RGB2BGR)
    cv2.imshow("matches2", I)
    cv2.waitKey(1)
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def saveImg_function(self, arg_frame,arg_savePath, arg_filename):
        utils_tool.check_path(arg_savePath)
        # make sure output dir exists
        #if(not path.isdir(arg_savePath)):
        #    makedirs(arg_savePath)
        #tmp= cv2.cvtColor(arg_frame, cv2.COLOR_RGB2BGR)
        cv2.imwrite(arg_savePath+arg_filename+'.jpg',arg_frame)
项目:jenova    作者:dungba88    | 项目源码 | 文件源码
def read_base64(base64_string):
    """read an image from base64 string"""
    sbuf = BytesIO()
    sbuf.write(base64.b64decode(base64_string))
    pimg = Image.open(sbuf)
    return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
项目:saliency-salgan-2017    作者:imatge-upc    | 项目源码 | 文件源码
def train():
    """
    Train both generator and discriminator
    :return:
    """
    # Load data
    print 'Loading training data...'
    with open('../saliency-2016-lsun/validationSample240x320.pkl', 'rb') as f:
    # with open(TRAIN_DATA_DIR, 'rb') as f:
        train_data = pickle.load(f)
    print '-->done!'

    print 'Loading validation data...'
    with open('../saliency-2016-lsun/validationSample240x320.pkl', 'rb') as f:
    # with open(VALIDATION_DATA_DIR, 'rb') as f:
        validation_data = pickle.load(f)
    print '-->done!'

    # Choose a random sample to monitor the training
    num_random = random.choice(range(len(validation_data)))
    validation_sample = validation_data[num_random]
    cv2.imwrite('./' + DIR_TO_SAVE + '/validationRandomSaliencyGT.png', validation_sample.saliency.data)
    cv2.imwrite('./' + DIR_TO_SAVE + '/validationRandomImage.png', cv2.cvtColor(validation_sample.image.data,
                                                                                cv2.COLOR_RGB2BGR))

    # Create network

    if flag == 'salgan':
        model = ModelSALGAN(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        # load_weights(net=model.net['output'], path="nss/gen_", epochtoload=15)
        # load_weights(net=model.discriminator['prob'], path="test_dialted/disrim_", epochtoload=54)
        salgan_batch_iterator(model, train_data, validation_sample.image.data)

    elif flag == 'bce':
        model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        # load_weights(net=model.net['output'], path='test/gen_', epochtoload=15)
        bce_batch_iterator(model, train_data, validation_sample.image.data)
    else:
        print "Invalid input argument."