Python cv2 模块,COLOR_HSV2BGR 实例源码

我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用cv2.COLOR_HSV2BGR

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def colormap(im, min_threshold=0.01):
    mask = im<min_threshold
    if im.ndim == 1: 
        print im
        hsv = np.zeros((len(im), 3), dtype=np.uint8)
        hsv[:,0] = (im * 180).astype(np.uint8)
        hsv[:,1] = 255
        hsv[:,2] = 255
        bgr = cv2.cvtColor(hsv.reshape(-1,1,3), cv2.COLOR_HSV2BGR).reshape(-1,3)
        bgr[mask] = 0
    else: 
        hsv = np.zeros((im.shape[0], im.shape[1], 3), np.uint8)
        hsv[...,0] = (im * 180).astype(np.uint8)
        hsv[...,1] = 255
        hsv[...,2] = 255
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        bgr[mask] = 0
    return bgr
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def random_saturation(img, label, lower=0.5, upper=1.5):
    """
    Multiplies saturation with a constant and clips the value between [0,1.0]
    Args:
        img: input image in float32
        label: returns label unchanged
        lower: lower val for sampling
        upper: upper val for sampling
    """
    alpha = lower + (upper - lower) * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # saturation should always be within [0,1.0]
    hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0)

    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
项目:VerySharp    作者:wilecoyote2015    | 项目源码 | 文件源码
def writeOpticalFlowImage(self, index, optical_flow):
        filename = "flow_" + str(index) + ".png"
        output_path = os.path.join(self.optical_flow_output_directory, filename)

        # create hsv image
        shape_optical_flow = optical_flow.shape[:-1]
        shape_hsv = [shape_optical_flow[0], shape_optical_flow[1], 3]
        hsv = np.zeros(shape_hsv, np.float32)

        # set saturation to 255
        hsv[:,:,1] = 255

        # create colorful illustration of optical flow
        mag, ang = cv2.cartToPolar(optical_flow[:,:,0], optical_flow[:,:,1])
        hsv[:,:,0] = ang*180/np.pi/2
        hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
        bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)

        cv2.imwrite(output_path, bgr)
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def check_split(self):
        # from copy import deepcopy
        # h = deepcopy(self.h)
        # s = deepcopy(self.s)
        # v = deepcopy(self.v)

        if not os.path.exists(self.output_path + 'check_merge/'):
            os.makedirs(self.output_path + 'check_merge/')

        merged = cv2.merge((self.h, self.s, self.v))
        cv2.imshow('hsv-remerged', merged)
        cv2.imwrite(self.output_path + 'check_merge/hsv-merged.jpg', merged)

        # Try to merge 3 noisy hsv channels into 1 noisy image
        merged2 = cv2.merge((self.n_h, self.n_s, self.n_v))
        cv2.imshow('hsv-noisy-remerged', merged2)

        rgb = cv2.cvtColor(merged, cv2.COLOR_HSV2BGR)
        cv2.imshow('rbg-remerged', rgb)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def show_video(self, frame, get_source_var, draw=[]):
        """

        :param frame: HSV-image
        :param get_source_var:
        :param draw:
        :return:
        """
        frame_time = get_source_var('FrameTime')
        self.total_frame_time = self.total_frame_time + frame_time

        #print(1/frame_time)

        if self.total_frame_time >= (1/30):

            if 0 != draw:
                for task in draw:
                    frame = task(frame)

            cv2.imshow('Soccer', cv2.cvtColor(frame, cv2.COLOR_HSV2BGR))
            cv2.waitKey(1)
            self.total_frame_time = 0
        else:
            return
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def FindSkeleton(self):

        rgb = cv2.cvtColor(self.ImgHSV, cv2.COLOR_HSV2BGR)
        angle = 0
        count = 0

        gray = cv2.cvtColor(cv2.cvtColor(self.ImgHSV,cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray,50,150,apertureSize = 3)

        lines = cv2.HoughLines(edges,1,np.pi/180,110)

        #print (lines)
        line_count = lines.shape[0]

        for x in range(line_count):

            for rho,theta in lines[x]:
                a = np.cos(theta)
                b = np.sin(theta)
                #print(theta)
                x0 = a*rho
                y0 = b*rho
                x1 = int(x0 + 1000*(-b))
                y1 = int(y0 + 1000*(a))
                x2 = int(x0 - 1000*(-b))
                y2 = int(y0 - 1000*(a))

                crr_angle = np.degrees(b)
                if (crr_angle < 5):
                    #print(crr_angle)
                    angle = angle + crr_angle
                    count = count + 1
                    cv2.line(rgb,(x1,y1),(x2,y2),(0,0,255),2)

        angle = angle / count
        self.angle = angle
        return (angle)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_hsv(flow, scale=2):
    h, w = flow.shape[:2]
    fx, fy = flow[:,:,0], flow[:,:,1]
    ang = np.arctan2(fy, fx) + np.pi
    v = np.sqrt(fx*fx+fy*fy)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[...,0] = ang*(180/np.pi/2)
    hsv[...,1] = 255
    hsv[...,2] = np.minimum(v*4*scale, 255)
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    return bgr
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def do_random_brightness(self, img):
        if np.random.rand() > 0.7:
            return img
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int)
        hsv[:,:,2] += np.random.randint(-40,70)
        hsv = np.clip(hsv, 0, 255).astype(np.uint8)
        img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return img
项目:demos    作者:jnez71    | 项目源码 | 文件源码
def cdisp(name, F, V, auto=True):
    mag = npl.norm(F, axis=0)
    ang = np.degrees(np.arctan2(F[1], F[0]) + np.pi)
    if auto: val = 255*mag/np.max(mag)
    else: val = 10000*mag
    img = cv2.cvtColor(np.uint8(np.dstack((ang/2, 255*np.ones_like(mag), val))), cv2.COLOR_HSV2BGR)
    img = img + cv2.cvtColor(np.uint8((255/np.max(V))*V), cv2.COLOR_GRAY2BGR)
    img = cv2.resize(np.clip(img, 0, 255), imshow_size)
    cv2.imshow(name, img)
    return img

# Recording tools
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:realtime-action-detection    作者:gurkirt    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:conta-bolas    作者:ocarneiro    | 项目源码 | 文件源码
def single_hsv2bgr(self, hsv_color):
        # creates a single pixel image (1 x 1 x 3 colors)
        temp_image = np.zeros((1,1,3), np.uint8)
        # paints the pixel in hsv
        temp_image[0] = (hsv_color)
        temp_image = cv2.cvtColor(temp_image, cv2.COLOR_HSV2BGR)
        #print type(temp_image[0][0][0])
        pixel = temp_image[0][0]
        bgr_color = (pixel[0].item(),
                     pixel[1].item(),
                     pixel[2].item())
        return bgr_color
项目:carvana-challenge    作者:chplushsieh    | 项目源码 | 文件源码
def transform(image):
    '''
    input:
      image: numpy array of shape (channels, height, width), in RGB code
    output:
      transformed: numpy array of shape (channels, height, width), in RGB code
    '''
    transformed = image

    hue_shift_limit = (-50, 50)
    sat_shift_limit = (-5, 5)
    val_shift_limit = (-15, 15)

    if np.random.random() < 0.5:
        transformed = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(transformed)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        transformed = cv2.merge((h, s, v))
        transformed = cv2.cvtColor(transformed, cv2.COLOR_HSV2BGR)

    return transformed
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:yolov2    作者:zhangkaij    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_robot(im):
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    lower = np.array([50, 28, 0])
    upper = np.array([60, 168, 255])
    mask = cv2.inRange(hsv, lower, upper)
    result = cv2.bitwise_and(im, im, mask=mask)
    blur = cv2.blur(result, (5, 5))
    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)
    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)
    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]

    x = 0
    y = 0
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x / len(cnt)
    y = y / len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(im, (x, y), 5, (255, 0, 255), 2)
    #show_image(im)
    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_robot(frame):
    im = copy.copy(frame)
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    lower = np.array([50, 28, 0])
    upper = np.array([60, 168, 255])
    mask = cv2.inRange(hsv, lower, upper)
    result = cv2.bitwise_and(im, im, mask=mask)
    blur = cv2.blur(result, (5, 5))
    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)
    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)
    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]

    x = 0
    y = 0
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x / len(cnt)
    y = y / len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(im, (x, y), 5, (255, 0, 255), 2)
    cv2.imshow('img', im)
    k = cv2.waitKey(0)
    cv2.imwrite('robot.jpg', im)
    #show_image(im)
    return (int(x), int(y))
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def mergeHSV(H, S, V):
    try:
        mergedHSV = cv2.merge((H, S, V))
    except TypeError:
        H = load_image(H, mode=0)
        S = load_image(S, mode=0)
        V = load_image(V, mode=0)
        mergedHSV = cv2.merge((H, S, V))

    HSV2RGB = cv2.cvtColor(mergedHSV, cv2.COLOR_HSV2BGR)

    # cv2.imshow('hsv', HSV2RGB)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    return HSV2RGB
项目:nexar-2    作者:lbin    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def hsv_transform(img, hue_delta, sat_mult, val_mult):
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.float)
    img_hsv[:, :, 0] = (img_hsv[:, :, 0] + hue_delta) % 180
    img_hsv[:, :, 1] *= sat_mult
    img_hsv[:, :, 2] *= val_mult
    img_hsv[img_hsv > 255] = 255
    return cv2.cvtColor(np.round(img_hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def change_light(image, value, channel="v"):
    """ Change the light intensity of an image."""

    channelDic = {"h": 0, "s":1, "v":2}
    # "translate" image channel to channel index
    if not channel in channelDic:
        raise AttributeError("invalid channel value. Valid values are h, s, or v")

    # which format (ConvNet (3, w, h) vs. Normal (w, h, 3)
    reshape = False
    prevShape = image.shape

    if image.shape[0] == 3 or image.shape[0] == 1:
        reshape = True
        if image.shape[0] == image.shape[1] or (image.shape[0] == 1 and image.shape[1] == 3): # grayscale 1L, 1L, h, w OR color 1L, 3L, h, w
            reshapeVector = (image.shape[2], image.shape[3], image.shape[1])         
        else:                      
            reshapeVector = (image.shape[1], image.shape[2], image.shape[0])                    # single row color or grayscale 1L/3L, h, w
        image = image.reshape(reshapeVector)

    #print "Shape",image.shape
    #print "dtype",image.dtype
    # convert to hsv
    hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
    # hsv[:,:,2] += value -  would be way faster but this does not prevent overflow (a high value gets even higher and becomes 0)
    channels = cv.split(hsv)
    for row in xrange(len(channels[channelDic[channel]])):
        for col in xrange(len(channels[channelDic[channel]][0])):
            channels[channelDic[channel]][row][col] = max(min(255, channels[channelDic[channel]][row][col]*value),0)

    image = cv.cvtColor(cv.merge(channels), cv.COLOR_HSV2BGR)

    # reshape back
    if reshape:        
        image = image.reshape(prevShape)
    return image
项目:nn_tools    作者:hahnyuan    | 项目源码 | 文件源码
def __call__(self, image, *args):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        if len(args):
            return (image, *args)
        else:
            return image
项目:Sign-Language-Recognition    作者:Anmol-Singh-Jaggi    | 项目源码 | 文件源码
def make_skin_white(frame):
    """
    Makes the skin color white.
    """
    print("Making skin white...")

    height, width = frame.shape[:2]

    # Convert image from HSV to BGR format
    frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
    # Convert image from BGR to gray format
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # Highlight the main object
    frame = cv2.GaussianBlur(frame, (5, 5), 0)

    threshold = 1
    for i in xrange(height):
        for j in xrange(width):
            if frame[i][j] > threshold:
                # Setting the skin tone to be white.
                frame[i][j] = 255
            else:
                # Setting everything else to be black.
                frame[i][j] = 0

    print("Done!")
    return frame
项目:SSD-Keras_Tensorflow    作者:jedol    | 项目源码 | 文件源码
def random_saturation(image, prob, lower=0.5, upper=1.5):
    ## Input
    ##  image: 3d array = (h,w,c)
    ##  prob: The probability of adjusting saturation.
    ##  lower: Lower bound for the random saturation factor. Recommend 0.5.
    ##  upper: Upper bound for the random saturation factor. Recommend 1.5.

    if np.random.uniform() < prob:
        alpha = np.random.uniform(lower, upper)
        if abs(alpha-1) > 1e-3:
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            hsv[:,:,1] = np.uint8(np.clip(hsv[:,:,1].astype(np.float32)*alpha, 0, 255))
            image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    return image
项目:SSD-Keras_Tensorflow    作者:jedol    | 项目源码 | 文件源码
def random_hue(image, prob, delta=36):
    ## Input
    ##  image: 3d array = (h,w,c)
    ##  prob: The probability of adjusting hue.
    ##  delta: Amount to add to the hue channel within [-delta, delta].
    ##         The possible value is within [0, 180]. Recommend 36.

    if np.random.uniform() < prob:
        beta = np.random.uniform(-delta, delta)
        if 0 > beta >= 1:
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            hsv[:,:,0] = np.uint8(np.clip(hsv[:,:,0].astype(np.float32)+beta, 0, 255))
            image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    return image
项目:deep-learning-for-human-part-discovery-in-images    作者:shiba24    | 项目源码 | 文件源码
def change_hue(self, img, delta_hue):
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)
        hsv[:, :, 0] += delta_hue
        hued_img = cv2.cvtColor(np.clip(hsv, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
        return hued_img
项目:DelaunayVisualization-FacialWarp    作者:sneha-belkhale    | 项目源码 | 文件源码
def drawColoredTriangles(img, triangleList, disp):
    #sort the triangle list by distance from the top left corner in order to get a gradient effect when drawing triangles
    triangleList=sorted(triangleList, cmp=triDistanceSort)
    h, w, c = img.shape
    #get bounding rectangle points of image
    r = (0, 0, w, h)
    #iterate through and draw all triangles in the list
    for idx, t in enumerate(triangleList):
        #grab individual vertex points
        pt1 = [t[0], t[1]]
        pt2 = [t[2], t[3]]
        pt3 = [t[4], t[5]]
        #select a position for displaying the enumerated triangle value
        pos = (t[2], t[3])
        #create the triangle
        triangle = np.array([pt1, pt2, pt3], np.int32)
        #select a color in HSV!! (manipulate idx for cool color gradients)
        color = np.uint8([[[idx, 100, 200]]])
        #color = np.uint8([[[0, 0, idx]]])
        #convert color to BGR
        bgr_color = cv2.cvtColor(color, cv2.COLOR_HSV2BGR)
        color = (int(bgr_color[(0, 0, 0)]), int(bgr_color[(0, 0, 1)]), int(bgr_color[(0, 0, 2)]))

        #draw the triangle if it is within the image bounds
        if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):
            cv2.fillPoly(img, [triangle], color)
            # if display triangle number was selected, display the number.. this helps with triangle manipulation later
            if(disp==1):
                cv2.putText(img, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale=0.3, color=(0, 0, 0))





######################################## example script ########################################
项目:OpenTracker    作者:patrickpoirier51    | 项目源码 | 文件源码
def __init__(self):

        # read fake balloon location from config file
        self.fake_balloon_location = LocationGlobal(balloon_config.config.get_float('fake-balloon', 'lat',-35.363274),
                                              balloon_config.config.get_float('fake-balloon', 'lon',149.164630),
                                              balloon_config.config.get_float('fake-balloon', 'alt',15))

        # fake balloon's colour is mid way between colour filter's low and high values
        h = (balloon_finder.filter_low[0] + balloon_finder.filter_high[0]) / 2
        s = (balloon_finder.filter_low[1] + balloon_finder.filter_high[1]) / 2
        v = (balloon_finder.filter_low[2] + balloon_finder.filter_high[2]) / 2

        # convert colour to BGR palette
        fake_balloon_colour_bgr = cv2.cvtColor(numpy.uint8([[[h,s,v]]]),cv2.COLOR_HSV2BGR)
        self.fake_balloon_colour_bgr_scalar = cv2.cv.Scalar(fake_balloon_colour_bgr.item(0), fake_balloon_colour_bgr.item(1), fake_balloon_colour_bgr.item(2))


        # fake balloon is same radius as actual balloon
        self.fake_balloon_radius = balloon_finder.balloon_radius_expected

        # background sky and ground colours
        self.background_sky_colour_bgr = (232, 228, 227)
        #self.background_ground_colour_bgr_scalar = cv2.cv.Scalar(87, 145, 158)
        self.background_ground_colour_bgr_scalar = cv2.cv.Scalar(87, 145, 158)

        # last iterations balloon radius
        self.last_balloon_radius = 0


    # get_background - returns a background image given a roll and pitch angle
    #     vehicle_roll and pitch are in radians
项目:videoseg    作者:pathak22    | 项目源码 | 文件源码
def compute_flow(impath1, impath2, outdir,
                    fbcodepath=os.getenv("HOME") + '/fbcode'):
    stem = os.path.splitext(os.path.basename(impath1))[0]
    deepmatch_cmd = os.path.join(fbcodepath,
                                    '_bin/experimental/deeplearning/dpathak' +
                                    '/video-processing/deepmatch/deepmatch')
    call([deepmatch_cmd, impath1, impath2, '-out',
                os.path.join(outdir, stem + '_sparse.txt'), '-downscale', '2'])
    img1 = cv2.imread(impath1).astype(float)
    M = np.zeros((img1.shape[0], img1.shape[1]), dtype=np.float32)
    filt = np.array([[1., -1.]]).reshape((1, -1))
    for c in range(3):
        gx = convolve2d(img1[:, :, c], filt, mode='same')
        gy = convolve2d(img1[:, :, c], filt.T, mode='same')
        M = M + gx**2 + gy**2

    M = M / np.max(M)
    with open(os.path.join(outdir, '_edges.bin'), 'w') as f:
        M.tofile(f)

    epicflow_command = os.path.join(fbcodepath,
                                    '_bin/experimental/deeplearning/dpathak' +
                                    '/video-processing/epicflow/epicflow')
    call([epicflow_command, impath1, impath2,
                os.path.join(outdir, '_edges.bin'),
                os.path.join(outdir, stem + '_sparse.txt'),
                os.path.join(outdir, 'flow.flo')])

    flow = read_flo(os.path.join(outdir, 'flow.flo'))
    hsv = np.zeros_like(img1).astype(np.uint8)
    hsv[..., 1] = 255
    mag, ang = cv2.cartToPolar(flow[..., 0].astype(float),
                                flow[..., 1].astype(float))
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    hsv[..., 0] = ang * 180 / np.pi / 2
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    cv2.imwrite(os.path.join(outdir, stem + '_flow.png'), bgr)
项目:videoseg    作者:pathak22    | 项目源码 | 文件源码
def run_deepmatch(imname1, imname2):
    command = os.getenv("HOME") + '/fbcode/_bin/experimental/' + \
        'deeplearning/dpathak/video-processing/deepmatch/deepmatch'
    call([command, imname1, imname2,
            '-out', os.getenv("HOME") + '/local/data/trash/tmp.txt',
            '-downscale', '2'])
    with open(os.getenv("HOME") + '/local/data/trash/tmp.txt', 'r') as f:
        lines = f.readlines()

    lines = [x.strip().split(' ') for x in lines]
    vals = np.array([[float(y) for y in x] for x in lines])
    x = ((vals[:, 0] - 8.) / 16.).astype(int)
    y = ((vals[:, 1] - 8.) / 16.).astype(int)
    U = np.zeros((int(np.max(y)) + 1, int(np.max(x)) + 1))
    U[(y, x)] = vals[:, 2] - vals[:, 0]
    V = np.zeros((int(np.max(y)) + 1, int(np.max(x)) + 1))
    V[(y, x)] = vals[:, 3] - vals[:, 1]

    img1 = cv2.imread(imname1)
    U1 = cv2.resize(U, (img1.shape[1], img1.shape[0]))
    V1 = cv2.resize(V, (img1.shape[1], img1.shape[0]))

    mag, ang = cv2.cartToPolar(U1, V1)
    print(np.max(mag))
    hsv = np.zeros_like(img1)
    hsv[..., 1] = 255
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    return bgr
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def get_center_scale(self, calibration_image):
        """

        :param calibration_image: The HSV-image to use for calculation
        :return: Position of center point in image (tuple), ratio px per cm (reproduction scale)
        """
        gray = cv2.cvtColor(calibration_image, cv2.COLOR_HSV2BGR)
        gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 1)

        circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 100, param1=50, param2=30, minRadius=50, maxRadius=300)

        center_circle = (0, 0, 0)
        min_dist = 0xFFFFFFFFFFF
        for circle in circles[0]:
            dist_x = abs(circle[0] - calibration_image.shape[1] / 2)
            dist_y = abs(circle[1] - calibration_image.shape[0] / 2)

            if(dist_x + dist_y) < min_dist:
                min_dist = dist_x + dist_y
                center_circle = circle

        rgb = cv2.cvtColor(calibration_image, cv2.COLOR_HSV2RGB)
        cv2.circle(rgb, (center_circle[0], center_circle[1]), center_circle[2], (0, 255, 0), 1)

        center = center_circle[0], center_circle[1]

        radius = center_circle[2]
        ratio_pxcm = radius / 10.25

        self.center = center
        self.ratio_pxcm = ratio_pxcm

        return [center, ratio_pxcm]
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def get_angle(self, calibration_image):
        """

        :param calibration_image: The HSV-image to use for calculation
        :return: Rotation angle of the field in image
        """
        # TODO: correct return value comment?
        rgb = cv2.cvtColor(calibration_image, cv2.COLOR_HSV2BGR)
        angle = 0
        count = 0

        gray = cv2.cvtColor(cv2.cvtColor(calibration_image, cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 150, apertureSize=3)

        lines = cv2.HoughLines(edges, 1, np.pi/180, 110)

        if lines.shape[0]:
            line_count = lines.shape[0]
        else:
            raise Exception('field not detected')

        for x in range(line_count):

            for rho, theta in lines[x]:
                a = np.cos(theta)
                b = np.sin(theta)
                # print(theta)
                x0 = a * rho
                y0 = b * rho
                x1 = int(x0 + 1000*(-b))
                y1 = int(y0 + 1000*a)
                x2 = int(x0 - 1000*(-b))
                y2 = int(y0 - 1000*a)

                corr_angle = np.degrees(b)
                if corr_angle < 5:
                    # print(CorrAngle)
                    angle = angle + corr_angle
                    count = count + 1
                    cv2.line(rgb, (x1, y1), (x2, y2), (0, 0, 255), 2)
        print(angle)
        if isinstance(angle, int) and isinstance(count, int):
            angle = angle / count
            self.angle = angle
            return angle
        else:
            self.angle = 0.1
            return False
项目:DSOD-Pytorch-Implementation    作者:Ellinier    | 项目源码 | 文件源码
def data_augmentation(self, img, boxes, labels):
        img, boxes = self.random_flip(img, boxes)
        img, boxes, labels = self.random_zoom(img, boxes, labels)
        img = self.pil_to_cv(img)
        img = self.random_contrast(img)
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # img = self.random_hue(img)
        # img = self.random_saturation(img)
        # img = self.random_brightness(img)
        # img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
        img = self.random_color_channels(img)
        img = self.cv_to_pil(img)
        return img, boxes, labels
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(frame):
    # converting to HSV

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    #cv2.imshow('image', frame)
    #k = cv2.waitKey(0)

    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(img):
    # converting to HSV
    frame = copy.copy(img)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    cv2.imshow('image', frame)
    cv2.imwrite('goal.jpg', frame)
    k = cv2.waitKey(0)

    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(frame):
    # converting to HSV

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    #cv2.imshow('image', frame)
    #k = cv2.waitKey(0)

    return (int(x), int(y))