Python cv2 模块,merge() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.merge()

项目:fully-convolutional-network-semantic-segmentation    作者:alecng94    | 项目源码 | 文件源码
def enhance(image_path, clip_limit=3):
    image = cv2.imread(image_path)
    # convert image to LAB color model
    image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)

    # split the image into L, A, and B channels
    l_channel, a_channel, b_channel = cv2.split(image_lab)

    # apply CLAHE to lightness channel
    clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
    cl = clahe.apply(l_channel)

    # merge the CLAHE enhanced L channel with the original A and B channel
    merged_channels = cv2.merge((cl, a_channel, b_channel))

    # convert iamge from LAB color model back to RGB color model
    final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
    return cv2_to_pil(final_image)
项目:MusicGenerator    作者:Conchylicultor    | 项目源码 | 文件源码
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img)
项目:robik    作者:RecunchoMaker    | 项目源码 | 文件源码
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200

        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None
项目:rpg_davis_simulator    作者:uzh-rpg    | 项目源码 | 文件源码
def extract_grayscale(img, srgb=False):
  dw = img.header()['dataWindow']

  size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
  precision = Imath.PixelType(Imath.PixelType.FLOAT)
  R = img.channel('R', precision)
  G = img.channel('G', precision)
  B = img.channel('B', precision)

  r = np.fromstring(R, dtype = np.float32)
  g = np.fromstring(G, dtype = np.float32)
  b = np.fromstring(B, dtype = np.float32)

  r.shape = (size[1], size[0])
  g.shape = (size[1], size[0])
  b.shape = (size[1], size[0])

  rgb = cv2.merge([b, g, r])
  grayscale = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)

  if srgb:
      grayscale = lin2srgb(grayscale)

  return grayscale
项目:unet-color    作者:4g    | 项目源码 | 文件源码
def arrange_images(Y):
    concat_image = None
    Y = (Y + 1)/2
    for yi in np.split(Y, 10):
        image = None
        for y in yi:
            img = cv2.merge((y[0, :, :], y[1, :, :], y[2, :, :]))
            if image is None:
                image = img
            else:
                image = np.concatenate((image, img))
        if concat_image is None:
            concat_image = image
        else:
            concat_image = np.concatenate((concat_image, image), axis=1)
    return concat_image
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def get_rescaled(fname, metadata, directory, rescaled_directory):
    # TODO(dek): move rescaling to its own function
    rescaled_fname = fname + ".rescaled.png"
    rescaled = os.path.join(rescaled_directory, rescaled_fname)
    if not os.path.exists(rescaled):
        print "Unable to find cached rescaled image for", fname
        return None
    image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
    if image is None:
        print "Failed to read image from", rescaled
        return None
    b_channel, g_channel, r_channel = cv2.split(image)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
    image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))

    return image
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def check_split(self):
        # from copy import deepcopy
        # h = deepcopy(self.h)
        # s = deepcopy(self.s)
        # v = deepcopy(self.v)

        if not os.path.exists(self.output_path + 'check_merge/'):
            os.makedirs(self.output_path + 'check_merge/')

        merged = cv2.merge((self.h, self.s, self.v))
        cv2.imshow('hsv-remerged', merged)
        cv2.imwrite(self.output_path + 'check_merge/hsv-merged.jpg', merged)

        # Try to merge 3 noisy hsv channels into 1 noisy image
        merged2 = cv2.merge((self.n_h, self.n_s, self.n_v))
        cv2.imshow('hsv-noisy-remerged', merged2)

        rgb = cv2.cvtColor(merged, cv2.COLOR_HSV2BGR)
        cv2.imshow('rbg-remerged', rgb)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def extractPlantsArea(self, arg_mode=0,arg_INV= False, b_threshold=80, a_threshold=80):
        zeros = np.zeros(self.image.shape[:2], dtype = "uint8")

        imgLAB = cv2.cvtColor(self.image, self.colorSpace)
        (L, A, B) = cv2.split(imgLAB)
        cv2.imwrite('Debug/imgB.jpg',B)
        cv2.imwrite('Debug/imgA.jpg',A)
        #(T_weeds_b, thresh_weeds_b) = cv2.threshold(B, b_threshold, 255, cv2.THRESH_BINARY)
        #(T_weeds_a, thresh_weeds_a) = cv2.threshold(A, a_threshold, 255, cv2.THRESH_BINARY)
        if arg_mode==0:
            thresh_weeds_a= imgProcess_tool.binarialization(A,0,arg_INV, a_threshold)
            thresh_weeds_b= imgProcess_tool.binarialization(B,0,arg_INV, b_threshold)
        elif arg_mode==1:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 1, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 1, arg_INV)
        elif arg_mode==2:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 2, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 2, arg_INV)
        cv2.imwrite('Debug/imgB_thr.jpg',thresh_weeds_b)
        cv2.imwrite('Debug/imgA_thr.jpg',thresh_weeds_a)
        imgRGB = cv2.merge([zeros, thresh_weeds_b, thresh_weeds_a])
        return thresh_weeds_a, thresh_weeds_b
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def main():
    imgList = getImageList(input_folder='/home/jin/shenzhenyuan/head-segmentation/input/test',
                           output_file='/home/jin/shenzhenyuan/head-segmentation/input/testSet.txt')
    for img_path in imgList:
        img = cv2.imread('{}'.format(img_path))
        if img_path[:img_path.rfind('.')].endswith('png'):
            str = img_path[:img_path.rfind('.')] + '-seg.png'
        else:
            str = img_path[:img_path.rfind('.')] + '.png-seg.png'
        mask = cv2.imread('{}'.format(str))
        prob = mask[:,:,0:2] / 255.0
        prob[:, :, 1] = 1 - prob[:, :, 0]
        res, Q = denseCRF(img, prob)
        a = 1-res
        a = a.astype('uint8')

        r_channel, g_channel, b_channel = cv2.split(img)
        img_rgba = cv2.merge((r_channel, g_channel, b_channel, a*255))
        cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), img_rgba)

        # a = np.dstack((a,)*3)
        # plt.imshow(a*img)
        # cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), (a>0.1)*img)

        cv2.imwrite('{}_crf_qtsu.png'.format(img_path[:img_path.find('.')]), cropHead(Q, img))
项目:Bayesian-FlowNet    作者:Johswald    | 项目源码 | 文件源码
def flows_to_img(flows):
    """Pyfunc wrapper to transorm flow vectors in color coding"""

    def _flow_transform(flows):
        """ Tensorflow Pyfunc to transorm flow to color coding"""

        flow_imgs = []
        for flow in flows:
            img = computeColor.computeImg(flow)
            # cv2 returns bgr images
            b, g, r = cv2.split(img)
            img = cv2.merge((r, g, b))
            flow_imgs.append(img)
        return [flow_imgs]

    flow_imgs = tf.py_func(_flow_transform, [flows],
                           [tf.uint8], stateful=False, name='flow_transform')

    flow_imgs = tf.squeeze(tf.stack(flow_imgs))
    flow_imgs.set_shape([FLAGS.batchsize] + FLAGS.d_shape_img)
    return flow_imgs
项目:How_to_generate_music_in_tensorflow_LIVE    作者:llSourcell    | 项目源码 | 文件源码
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img)
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def normalized(self):

#        t1=time.time()
        b=self.down[:,:,0]
        g=self.down[:,:,1]
        r=self.down[:,:,2]

        sum=b+g+r


        self.norm[:,:,0]=b/sum*255.0
        self.norm[:,:,1]=g/sum*255.0
        self.norm[:,:,2]=r/sum*255.0

 #       print "conversion time",time.time()-t1

        #self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
        self.norm_rgb=cv2.convertScaleAbs(self.norm)
        #self.norm.dtype=np.uint8
        return self.norm_rgb
项目:BlindWaterMark    作者:chishaxie    | 项目源码 | 文件源码
def bgr_to_rgb(img):
    b, g, r = cv2.split(img)
    return cv2.merge([r, g, b])
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def add_alpha_channel(img):
    # img = cv2.imread(path)
    b_channel, g_channel, r_channel = cv2.split(img)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255 #creating a dummy alpha channel image.
    return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def addFrame(self, frame, width=300):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w * 2, self.h * 2), True)
            self.zeros = np.zeros((self.h, self.w), dtype="uint8")

        # break the image into its RGB components, then construct the
        # RGB representation of each frame individually
        (B, G, R) = cv2.split(frame)
        R = cv2.merge([self.zeros, self.zeros, R])
        G = cv2.merge([self.zeros, G, self.zeros])
        B = cv2.merge([B, self.zeros, self.zeros])

        # construct the final output frame, storing the original frame
        # at the top-left, the red channel in the top-right, the green
        # channel in the bottom-right, and the blue channel in the
        # bottom-left
        output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
        output[0:self.h, 0:self.w] = frame
        output[0:self.h, self.w:self.w * 2] = R
        output[self.h:self.h * 2, self.w:self.w * 2] = G
        output[self.h:self.h * 2, 0:self.w] = B

        # write the output frame to file
        self.writer.write(output)
项目:tensorflow-pi    作者:karaage0703    | 项目源码 | 文件源码
def equalizeHistRGB(src):

    RGB = cv2.split(src)
    Blue   = RGB[0]
    Green = RGB[1]
    Red    = RGB[2]
    for i in range(3):
        cv2.equalizeHist(RGB[i])

    img_hist = cv2.merge([RGB[0],RGB[1], RGB[2]])
    return img_hist

# ????????
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def rgb(bgr_img):
    b,g,r = cv.split(bgr_img)       # get b,g,r
    rgb_img = cv.merge([r,g,b])     # switch it to rgb
    return rgb_img

# Given directory loc, get all images in directory and crop to just faces
# Returns face_list, an array of cropped image file names
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def toggleRGB(img):
  r,g,b = cv.split(img)
  img = cv.merge([b,g,r])
  return img

# Combine two images for displaying side-by-side
# If maxSize is true, crops sides of image to keep under 2880 pixels in width
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def load_minibatch(input_list, color, labels, start,num):
    # Enforce maximum on start
    start = max(0,start)

    # Enforce minimum on end
    end = start + num
    end = min(len(input_list), end)

    # Isolate files
    files = input_list[start:end]

    images = []
    for file in files:
        img = caffe.io.load_image(file, color)

        # Handle incorrect image dims for uncropped images
        # TODO: Get uncropped images to import correctly
        if img.shape[0] == 3 or img.shape[0] == 1:
            img = np.swapaxes(np.swapaxes(img, 0, 1), 1, 2)

        # BUG FIX: Is this ok?
        # color=True gets the correct desired dimension of WxHx3
        # But color=False gets images of WxHx1. Need WxHx3 or will get "Index out of bounds" exception
        # Fix by concatenating three copies of the image
        if img.shape[2] == 1:
            img = cv.merge([img,img,img])

        # Add image array to batch
        images.append(img)

    labelsReduced = labels[start:end]
    return images, labelsReduced

# Classify all images in a list of image file names
# No return value, but can display outputs if desired
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def load_hat(self, path):  # pylint: disable=no-self-use
        """Loads the hat from a picture at path.

        Args:
            path: The path to load from

        Returns:
            The hat data.
        """
        hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if hat is None:
            raise ValueError('No hat image found at `{}`'.format(path))
        b, g, r, a = cv2.split(hat)
        return cv2.merge((r, g, b, a))
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def removebg(segmented_img):
    src = cv2.imdecode(np.squeeze(np.asarray(segmented_img[1])), 1)
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    processed_img = cv2.imencode('.png', dst)

    return processed_img
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def predict_image(flag):
    t_start = cv2.getTickCount()
    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
            loaded_model_json = json_file.read()
    model = model_from_json(loaded_model_json)
    weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
    model.load_weights(weight_list[-1])
    print "[*] model load : %s"%weight_list[-1]
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000 
    print "[*] model loading Time: %.3f ms"%t_total

    imgInput = cv2.imread(flag.test_image_path, 0)
    input_data = imgInput.reshape((1,256,256,1))

    t_start = cv2.getTickCount()
    result = model.predict(input_data, 1)
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
    print "Predict Time: %.3f ms"%t_total

    imgMask = (result[0]*255).astype(np.uint8)
    imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
    _, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
    imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
    # imgZero = np.zeros((256,256), np.uint8)
    # imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
    imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
    output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
    cv2.imwrite(output_path, imgShow)
    print "SAVE:[%s]"%output_path
项目:saliency    作者:shuuchen    | 项目源码 | 文件源码
def makeNormalizedColorChannels(image, thresholdRatio=10.):
    """
        Creates a version of the (3-channel color) input image in which each of
        the (4) channels is normalized.  Implements color opponencies as per 
        Itti et al. (1998).
        Arguments:
            image           : input image (3 color channels)
            thresholdRatio  : the threshold below which to set all color values
                                to zero.
        Returns:
            an output image with four normalized color channels for red, green,
            blue and yellow.
    """
    intens = intensity(image)
    threshold = intens.max() / thresholdRatio
    logger.debug("Threshold: %d", threshold)
    r,g,b = cv2.split(image)
    cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    R = r - (g + b) / 2
    G = g - (r + b) / 2
    B = b - (g + r) / 2
    Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b

    # Negative values are set to zero.
    cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)

    image = cv2.merge((R,G,B,Y))
    return image
项目:saliency    作者:shuuchen    | 项目源码 | 文件源码
def markMaxima(saliency):
    """
        Mark the maxima in a saliency map (a gray-scale image).
    """
    maxima = maximum_filter(saliency, size=(5, 5))
    maxima = numpy.array(saliency == maxima, dtype=numpy.float64) * 255
    g = cv2.max(saliency, maxima)
    r = saliency
    b = saliency
    marked = cv2.merge((b,g,r))
    return marked
项目:carvana-challenge    作者:chplushsieh    | 项目源码 | 文件源码
def transform(image):
    '''
    input:
      image: numpy array of shape (channels, height, width), in RGB code
    output:
      transformed: numpy array of shape (channels, height, width), in RGB code
    '''
    transformed = image

    hue_shift_limit = (-50, 50)
    sat_shift_limit = (-5, 5)
    val_shift_limit = (-15, 15)

    if np.random.random() < 0.5:
        transformed = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(transformed)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        transformed = cv2.merge((h, s, v))
        transformed = cv2.cvtColor(transformed, cv2.COLOR_HSV2BGR)

    return transformed
项目:python-image-processing    作者:karaage0703    | 项目源码 | 文件源码
def color_gray(src):
    img_bgr = cv2.split(src)
    dst = cv2.merge(((img_bgr[0] + img_bgr[1] + img_bgr[2])/3, (img_bgr[0] + img_bgr[1] + img_bgr[2])/3, (img_bgr[0] + img_bgr[1] + img_bgr[2])/3))

    return dst
项目:python-image-processing    作者:karaage0703    | 项目源码 | 文件源码
def color_swap(src):
    img_bgr = cv2.split(src)
    dst = cv2.merge((img_bgr[2], img_bgr[1], img_bgr[0])) # from BGR to RGB

    return dst
项目:python-image-processing    作者:karaage0703    | 项目源码 | 文件源码
def color_sepia(src):
    img_bgr = cv2.split(src)
    # R=A, G=0.8xA, B=0.55xA
    dst = cv2.merge((img_bgr[0] * 0.55 , img_bgr[1] * 0.8, img_bgr[2] * 1.0))

    return dst
项目:caffe-model    作者:GeekLiB    | 项目源码 | 文件源码
def image_preprocess(img):
    b, g, r = cv2.split(img)
    return cv2.merge([(b-mean_value[0])/std[0], (g-mean_value[1])/std[1], (r-mean_value[2])/std[2]])
项目:vehicle_detection    作者:AuzanMuh    | 项目源码 | 文件源码
def yuvPassShadowRemoval(src, shadowThreshold):
    height, width = src.shape[:2]
    imgYUV = cv2.cvtColor(src, cv2.COLOR_RGB2YUV)
    yImg, uImg, vImg = cv2.split(imgYUV)

    # for i in range(0, height):
    #   for j in range(0, width):
    #       yImg[i, j] = 0
    yImg = np.zeros((height, width, 1), np.uint8)
    imgYUV = cv2.merge([yImg, uImg, vImg])

    rgbImg = cv2.cvtColor(imgYUV, cv2.COLOR_YUV2RGB)
    rImg, gImg, bImg = cv2.split(rgbImg)

    count = width * height
    avg = np.sum(bImg)
    avg /= count * 1.0
    # for i in range(0, height):
    #    for j in range(0, width):
    #        if bImg[i, j] > ave:
    #           rImg[i, j] = 255
    #           gImg[i, j] = 255
    #           bImg[i, j] = 255
    #        else:
    #           rImg[i, j] = 0
    #           gImg[i, j] = 0
    #           bImg[i, j] = 0

    if shadowThreshold is None:
        avg = avg
    else:
        avg = shadowThreshold

    np.where(bImg > avg, 255, 0)
    _, threshold = cv2.threshold(bImg, avg, 255, cv2.THRESH_BINARY)

    output = threshold
    return output
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def rgb(bgr_img):
    b,g,r = cv.split(bgr_img)       # get b,g,r
    rgb_img = cv.merge([r,g,b])     # switch it to rgb
    return rgb_img

# Given directory loc, get all images in directory and crop to just faces
# Returns face_list, an array of cropped image file names
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def toggleRGB(img):
  r,g,b = cv.split(img)
  img = cv.merge([b,g,r])
  return img


# Combine two images for displaying side-by-side
# If maxSize is true, crops sides of image to keep under 2880 pixel width of screen
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def load_minibatch(input_list, color, labels, start,num):
    # Enforce minimum on start
    start = max(0,start)

    # Enforce maximum on end
    end = start + num
    end = min(len(input_list), end)

    # Isolate files
    files = input_list[start:end]

    images = []
    for file in files:
        img = caffe.io.load_image(file, color)

        # Handle incorrect image dims for uncropped images
        # TODO: Get uncropped images to import correctly
        if img.shape[0] == 3 or img.shape[0] == 1:
            img = np.swapaxes(np.swapaxes(img, 0, 1), 1, 2)

        # BUG FIX: Is this ok?
        # color=True gets the correct desired dimension of WxHx3
        # But color=False gets images of WxHx1. Need WxHx3 or will get "Index out of bounds" exception
        # Fix by concatenating three copies of the image
        if img.shape[2] == 1:
            img = cv.merge([img,img,img])

        # Add image array to batch
        images.append(img)

    labelsReduced = labels[start:end]
    return images, labelsReduced

# Big function:
# Classify all images in a list of image file names
# Using the inputs, constructs a network, imports images either individually or in minibatches,
# gets the network classification, and builds up the confusion matrix.
# No return value, but it can plot the confusion matrix at the end
项目:discoGAN.tensorflow.slim    作者:ilguyi    | 项目源码 | 文件源码
def ImageWrite(image, name, step):
  r,g,b = cv2.split(image)
  image = cv2.merge([b,g,r])

  filename = 'styleA_%s_styleB_%s_' % (FLAGS.style_A, FLAGS.style_B)
  filename += name
  filename += '_%06.d.jpg' % step
  cv2.imwrite(filename, image)
项目:discoGAN.tensorflow.slim    作者:ilguyi    | 项目源码 | 文件源码
def read_images(filenames, domain=None, image_size=64):

    images = []
    for fn in filenames:
        image = cv2.imread(fn)
        if image is None:
            continue

        if domain == 'A':
            kernel = np.ones((3,3), np.uint8)
            image = image[:, :256, :]
            image = 255. - image
            image = cv2.dilate( image, kernel, iterations=1 )
            image = 255. - image
        elif domain == 'B':
            image = image[:, 256:, :]

        image = cv2.resize(image, (image_size,image_size))
        # Change the order of channels
        r,g,b = cv2.split(image)
        image = cv2.merge([b,g,r])
        # Scale from [0, 255] to [-1, 1]
        image = image.astype(np.float32) / 255.
        image -= 0.5
        image *= 2.0
        # TensorFlow shape (height, width, channels)
        #image = image.transpose(2,0,1)
        images.append( image )

    images = np.stack( images )
    return images
项目:MOOCs    作者:ankitaggarwal011    | 项目源码 | 文件源码
def apply_median(k):
  ''' Apply the given kernel to images

  This function searches through the images/source subfolder, and
  uses your convolution funciton implemented in part0 to apply the given kernel
  to each image found inside. It will then save the resulting images to the 
  images/filtered subfolder, appending their names with kernel_name.
  '''
  print 'applying median filter to images'

  sourcefolder = os.path.abspath(os.path.join(os.curdir, 'images', 'source'))
  outfolder = os.path.abspath(os.path.join(os.curdir, 'images', 'filtered'))

  print 'Searching for images in {} folder'.format(sourcefolder)

  exts = ['.bmp', '.pbm', '.pgm', '.ppm', '.sr', '.ras', '.jpeg', '.jpg', 
    '.jpe', '.jp2', '.tiff', '.tif', '.png']

  for dirname, dirnames, filenames in os.walk(sourcefolder):
    for filename in filenames:
      name, ext = os.path.splitext(filename)
      if ext in exts:
        print "Reading image {}.".format(filename)
        img = cv2.imread(os.path.join(dirname, filename))

        print "Applying filter."
        if len(img.shape) == 2:
          outimg = part3.filter_median(img, k)
        else:
          outimg = [] 
          for channel in range(img.shape[2]):
            outimg.append(part3.filter_median(img[:,:,channel], k))
          outimg = cv2.merge(outimg)
        outpath = os.path.join(outfolder, name + 'median' + str(k) + ext)

        print "Writing image {}.\n\n".format(outpath)
        cv2.imwrite(outpath, outimg)
项目:MOOCs    作者:ankitaggarwal011    | 项目源码 | 文件源码
def apply_filter(conv_func, kernel, kernel_name):
  ''' Apply the given kernel to images

  This function searches through the images/source subfolder, and
  uses your convolution funciton implemented in part0 to apply the given kernel
  to each image found inside. It will then save the resulting images to the 
  images/filtered subfolder, appending their names with kernel_name.
  '''
  print 'applying {} kernel to images'.format(kernel_name)

  sourcefolder = os.path.abspath(os.path.join(os.curdir, 'images', 'source'))
  outfolder = os.path.abspath(os.path.join(os.curdir, 'images', 'filtered'))

  print 'Searching for images in {} folder'.format(sourcefolder)

  exts = ['.bmp', '.pbm', '.pgm', '.ppm', '.sr', '.ras', '.jpeg', '.jpg', 
    '.jpe', '.jp2', '.tiff', '.tif', '.png']

  for dirname, dirnames, filenames in os.walk(sourcefolder):
    for filename in filenames:
      name, ext = os.path.splitext(filename)
      if ext in exts:
        print "Reading image {}.".format(filename)
        img = cv2.imread(os.path.join(dirname, filename))

        print "Applying filter."
        if len(img.shape) == 2:
          outimg = conv_func(img, kernel)
        else:
          outimg = []
          for channel in range(img.shape[2]):
            outimg.append(conv_func(img[:,:,channel], kernel))
          outimg = cv2.merge(outimg)
        outpath = os.path.join(outfolder, name + kernel_name + ext)

        print "Writing image {}.\n\n".format(outpath)
        cv2.imwrite(outpath, outimg)
项目:Vehicle-identification    作者:soloice    | 项目源码 | 文件源码
def equalize_hist_all(self, root='../data/val/'):
        raw_root, out_root = root + 'images/', root + 'normalized/'
        if not os.path.exists(out_root):
            os.mkdir(out_root)
        cnt = 0
        for parent, _, files in os.walk(raw_root):
            for name in files:
                img = cv2.imread(parent + name)
                b, g, r = cv2.split(img)
                bb, gg, rr = cv2.equalizeHist(b), cv2.equalizeHist(g), cv2.equalizeHist(r)
                [row, col] = b.shape

                if row > col:
                    d = row - col
                    add_block = np.zeros((d, row))
                    new_bb = np.vstack((bb.T, add_block))
                    new_gg = np.vstack((gg.T, add_block))
                    new_rr = np.vstack((rr.T, add_block))
                    new_bb = new_bb.T
                    new_gg = new_gg.T
                    new_rr = new_rr.T
                else:
                    d = col - row
                    add_block = np.zeros((d, col))
                    new_bb = np.vstack((add_block, bb))
                    new_gg = np.vstack((add_block, gg))
                    new_rr = np.vstack((add_block, rr))

                new_bb, new_gg, new_rr = np.uint8(new_bb), np.uint8(new_gg), np.uint8(new_rr)
                new_image = cv2.merge([new_bb, new_gg, new_rr])

                res = cv2.resize(new_image, (100, 100), interpolation=cv2.INTER_CUBIC)
                new_name = out_root + name
                cv2.imwrite(new_name, res)
                cnt += 1
                if cnt % 500 == 0:
                    print 'Processed', cnt, 'images!'
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def hisEqulColor(img):
    ycrcb=cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
    channels=cv2.split(ycrcb)
    # create a CLAHE object
    clahe = cv2.createCLAHE()
    channels[0] = clahe.apply(channels[0])
    cv2.merge(channels,ycrcb)
    cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2BGR,img)
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def get_rescaled(fname, rescaled_directory):
    rescaled_fname = fname + ".rescaled.png"
    rescaled = os.path.join(rescaled_directory, rescaled_fname)
    image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
    if image is None:
        print "Failed to read image from", rescaled
        return i, None
    # hisEqulColor(image)


    b_channel, g_channel, r_channel = cv2.split(image)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
    image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
    return image
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def mergeBGR(B, G, R):
    try:
        mergedBGR = cv2.merge((B, G, R))
    except TypeError:
        B = load_image(B, mode=0)
        G = load_image(G, mode=0)
        R = load_image(R, mode=0)
        mergedBGR = cv2.merge((B, G, R))

    # cv2.imshow('bgr', mergedBGR)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    return mergedBGR
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def mergeHSV(H, S, V):
    try:
        mergedHSV = cv2.merge((H, S, V))
    except TypeError:
        H = load_image(H, mode=0)
        S = load_image(S, mode=0)
        V = load_image(V, mode=0)
        mergedHSV = cv2.merge((H, S, V))

    HSV2RGB = cv2.cvtColor(mergedHSV, cv2.COLOR_HSV2BGR)

    # cv2.imshow('hsv', HSV2RGB)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    return HSV2RGB
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def simplest_cb(img, percent):
    assert img.shape[2] == 3
    assert percent > 0 and percent < 100

    half_percent = percent / 200.0

    channels = cv2.split(img)

    out_channels = []
    for channel in channels:
        assert len(channel.shape) == 2
        # find the low and high precentile values (based on the input percentile)
        height, width = channel.shape
        vec_size = width * height
        flat = channel.reshape(vec_size)

        assert len(flat.shape) == 1

        flat = np.sort(flat)

        n_cols = flat.shape[0]

        low_val  = flat[math.floor(n_cols * half_percent)]
        high_val = flat[math.ceil( n_cols * (1.0 - half_percent))]

        print "Lowval: ", low_val
        print "Highval: ", high_val

        # saturate below the low percentile and above the high percentile
        thresholded = apply_threshold(channel, low_val, high_val)
        # scale the channel
        normalized = cv2.normalize(thresholded, thresholded.copy(), 0, 255, cv2.NORM_MINMAX)
        out_channels.append(normalized)

    return cv2.merge(out_channels)
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def equalize_BGR_image(image):
    """ Histogram eq whole color image."""

    b, g, r = cv.split(image)
    b = equalize_image_channel(b)
    g = equalize_image_channel(g)
    r = equalize_image_channel(r)
    return cv.merge((b,g,r))
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def equalize_BGR_image_adaptive(image):
    """ Adaptive color image equalization (CLAHE)."""

    b, g, r = cv.split(image)
    b = equalize_image_channel_adaptive(b)
    g = equalize_image_channel_adaptive(g)
    r = equalize_image_channel_adaptive(r)
    return cv.merge((b,g,r))
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def change_light(image, value, channel="v"):
    """ Change the light intensity of an image."""

    channelDic = {"h": 0, "s":1, "v":2}
    # "translate" image channel to channel index
    if not channel in channelDic:
        raise AttributeError("invalid channel value. Valid values are h, s, or v")

    # which format (ConvNet (3, w, h) vs. Normal (w, h, 3)
    reshape = False
    prevShape = image.shape

    if image.shape[0] == 3 or image.shape[0] == 1:
        reshape = True
        if image.shape[0] == image.shape[1] or (image.shape[0] == 1 and image.shape[1] == 3): # grayscale 1L, 1L, h, w OR color 1L, 3L, h, w
            reshapeVector = (image.shape[2], image.shape[3], image.shape[1])         
        else:                      
            reshapeVector = (image.shape[1], image.shape[2], image.shape[0])                    # single row color or grayscale 1L/3L, h, w
        image = image.reshape(reshapeVector)

    #print "Shape",image.shape
    #print "dtype",image.dtype
    # convert to hsv
    hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
    # hsv[:,:,2] += value -  would be way faster but this does not prevent overflow (a high value gets even higher and becomes 0)
    channels = cv.split(hsv)
    for row in xrange(len(channels[channelDic[channel]])):
        for col in xrange(len(channels[channelDic[channel]][0])):
            channels[channelDic[channel]][row][col] = max(min(255, channels[channelDic[channel]][row][col]*value),0)

    image = cv.cvtColor(cv.merge(channels), cv.COLOR_HSV2BGR)

    # reshape back
    if reshape:        
        image = image.reshape(prevShape)
    return image