Python cv2 模块,split() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.split()

项目:PaperHelper    作者:EdgarNg1024    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares
项目:beryl    作者:DanielJDufour    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                area = cv2.contourArea(cnt)
                if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07):
                            squares.append(cnt)
    return squares
项目:fully-convolutional-network-semantic-segmentation    作者:alecng94    | 项目源码 | 文件源码
def enhance(image_path, clip_limit=3):
    image = cv2.imread(image_path)
    # convert image to LAB color model
    image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)

    # split the image into L, A, and B channels
    l_channel, a_channel, b_channel = cv2.split(image_lab)

    # apply CLAHE to lightness channel
    clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
    cl = clahe.apply(l_channel)

    # merge the CLAHE enhanced L channel with the original A and B channel
    merged_channels = cv2.merge((cl, a_channel, b_channel))

    # convert iamge from LAB color model back to RGB color model
    final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
    return cv2_to_pil(final_image)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def find_squares(img, cos_limit = 0.1):
    print('search for squares with threshold %f' % cos_limit)
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < cos_limit :
                        squares.append(cnt)
                    else:
                        #print('dropped a square with max_cos %f' % max_cos)
                        pass
    return squares

###
### Version V2.  Collect meta-data along the way,  with commentary added.
###
项目:robik    作者:RecunchoMaker    | 项目源码 | 文件源码
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200

        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
        '''
        contour_retrieval_mode is passed through as second argument to cv2.findContours
        '''

        # Attempt to match edges found in blue, green or red channels : collect all
        channel = 0
        for gray in cv2.split(self.img):
            channel += 1
            print('channel %d ' % channel)
            title = self.tgen.next('channel-%d' % channel)
            if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
            found = {}
            for thrs in xrange(0, 255, 26):
                print('Using threshold %d' % thrs)
                if thrs == 0:
                    print('First step')
                    bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                    title = self.tgen.next('canny-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                    bin = cv2.dilate(bin, None)
                    title = self.tgen.next('canny-dilate-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                else:
                    retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
                    title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
                    if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
                bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
                title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
                if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
                    filteredContours = contours
                else:
                    filteredContours = []
                    h = hierarchy[0]
                    for component in zip(contours, h):
                        currentContour = component[0]
                        currentHierarchy = component[1]
                        if currentHierarchy[3] < 0:
                            # Found the outermost parent component
                            filteredContours.append(currentContour)
                    print('Contours filtered.   Input %d  Output %d' % (len(contours), len(filteredContours)))
                    time.sleep(5)
                for cnt in filteredContours:
                    cnt_len = cv2.arcLength(cnt, True)
                    cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                    cnt_len = len(cnt)
                    cnt_area = cv2.contourArea(cnt)
                    cnt_isConvex = cv2.isContourConvex(cnt)
                    if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max)  and cnt_isConvex:
                        cnt = cnt.reshape(-1, 2)
                        max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                        if max_cos < self.cos_limit :
                            sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
                            self.squares.append(sq)
                        else:
                            #print('dropped a square with max_cos %f' % max_cos)
                            pass
                found[thrs] = len(self.squares)
                print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
项目:face-landmark    作者:lsy17096535    | 项目源码 | 文件源码
def DataRowFromAFW(anno, root=''): # Assume data comming from parsed anno-v7.mat file.
        name = str(anno[0][0])
        bbox = anno[1][0][0]
#        yaw, pitch, roll = anno[2][0][0][0]
        lm = anno[3][0][0]  # 6 landmarks

        if np.isnan(lm).any():
            return None  # Fail

        d = DataRow()
        d.path = os.path.join(root, name).replace("\\", "/")
        d.name = os.path.split(d.path)[-1]
        d.image = cv2.imread(d.path)


        d.leftEye = (float(lm[0][0]), float(lm[0][1]))
        d.rightEye = (float(lm[1][0]), float(lm[1][1]))
        d.middle = (float(lm[2][0]), float(lm[2][1]))
        d.leftMouth = (float(lm[3][0]), float(lm[3][1]))
        # skip point 4 middle mouth - We take 0 left eye, 1 right eye, 2 nose, 3 left mouth, 5 right mouth
        d.rightMouth = (float(lm[5][0]), float(lm[5][1]))


        return d
项目:Tesis-UIP    作者:ajlongart    | 项目源码 | 文件源码
def maxImagen(img, tamanyo):
    ''''''
    bOri, gOri, rOri = cv2.split(img)
    filas,columnas,canales = img.shape
    #pad_size = tamanyo/2
    #padded_max = np.pad(img, (pad_size, pad_size),'constant',constant_values=np.inf)
    max_channel = np.zeros((filas,columnas))
    for r in range(1,filas):
        for c in range(1,columnas):
            window_b = bOri[r:r+tamanyo,c:c+tamanyo]
            window_g = gOri[r:r+tamanyo,c:c+tamanyo]
            window_r = rOri[r:r+tamanyo,c:c+tamanyo]
            max_bg = np.max(window_b+window_g)
            max_r = np.max(window_r)
            max_ch = max_r-max_bg       #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
            max_ch_array = np.array([max_ch])
            max_channel[r,c] = max_ch_array

    min_max_channel = np.min(max_channel)
    background_bOri = np.mean(bOri*min_max_channel)
    background_gOri = np.mean(gOri*min_max_channel)
    BbOri = np.absolute(background_bOri)
    BgOri = np.absolute(background_gOri)

    return BbOri, BgOri     #max_channel,
项目:python-image-processing    作者:karaage0703    | 项目源码 | 文件源码
def extract_color( src, h_th_low, h_th_up, s_th, v_th ):
    hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)
    if h_th_low > h_th_up:
        ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) 
        ret, h_dst_2 = cv2.threshold(h, h_th_up,  255, cv2.THRESH_BINARY_INV)

        dst = cv2.bitwise_or(h_dst_1, h_dst_2)
    else:
        ret, dst = cv2.threshold(h,   h_th_low, 255, cv2.THRESH_TOZERO) 
        ret, dst = cv2.threshold(dst, h_th_up,  255, cv2.THRESH_TOZERO_INV)
        ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY)

    ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY)
    ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY)
    dst = cv2.bitwise_and(dst, s_dst)
    dst = cv2.bitwise_and(dst, v_dst)
    return dst
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def writeResults(DestinationFolder,resultFolder,name_array,classifier,Y_predicted):
    print(len(name_array))
    if not os.path.exists(resultFolder):
        os.mkdir(resultFolder)
    size_m = 0
    i = 0
    j = 0
    lc = 0
    while size_m < len(name_array):
        current = cv2.imread(DestinationFolder+name_array[size_m]+"_final_candidates.bmp")
        #print(DestinationFolder+name_array[size_m]+"_final_candidates.bmp.bmp")        
        #print("current ka size",current.shape)
        x,current_m,z = cv2.split(current)
        #print(count_ones(current_m,255),"now again check",name_array[size_m])
        i = 0       
        while i < current_m.shape[0]:
            j = 0
            while j < current_m.shape[1]:
                if current_m[i,j] == 255:
                    current_m[i,j] = 255*Y_predicted[lc]
                    lc = lc + 1
                j = j + 1
            i = i + 1
        cv2.imwrite(resultFolder+name_array[size_m]+classifier+"_result.bmp",current_m)
        size_m = size_m + 1
项目:APEX    作者:ymollard    | 项目源码 | 文件源码
def draw(self, frame):
        #rgbs = cv2.split(frame)
        #hsvs = cv2.split(hsv)

        #cv2.imshow("Hue", hsvs[0])
        #cv2.imshow("Frame", frame)
        #cv2.waitKey(1)

        #cv2.imshow("Red", rgbs[0])
        #cv2.imshow("Green", rgbs[1])
        #cv2.imshow("Blue", rgbs[2])
        #cv2.imshow("Saturation", hsvs[1])
        #cv2.imshow("Value", hsvs[2])

        cv2.imshow(self.params['name'], frame)
        cv2.waitKey(1)
项目:APEX    作者:ymollard    | 项目源码 | 文件源码
def draw_images(self, frame, hsv, mask_ball, mask_arena, arena_center, arena_ring_radius=None):
        self.draw_history(frame, 'ball')
        self.draw_history(frame, 'arena')
        if arena_ring_radius is not None:
            cv2.circle(frame, arena_center, arena_ring_radius, (0, 128, 255), 2)
        return frame

        #rgbs = cv2.split(frame)
        #hsvs = cv2.split(hsv)

        #cv2.imshow("Hue", hsvs[0])
        #cv2.imshow("Mask ball", mask_ball)
        #cv2.imshow("Mask arena", mask_arena)
        #cv2.imshow("Frame", frame)
        #cv2.waitKey(1)

        #cv2.imshow("Red", rgbs[0])
        #cv2.imshow("Green", rgbs[1])
        #cv2.imshow("Blue", rgbs[2])
        #cv2.imshow("Saturation", hsvs[1])
        #cv2.imshow("Value", hsvs[2])
        #cv2.waitKey(1)
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def get_rescaled(fname, metadata, directory, rescaled_directory):
    # TODO(dek): move rescaling to its own function
    rescaled_fname = fname + ".rescaled.png"
    rescaled = os.path.join(rescaled_directory, rescaled_fname)
    if not os.path.exists(rescaled):
        print "Unable to find cached rescaled image for", fname
        return None
    image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
    if image is None:
        print "Failed to read image from", rescaled
        return None
    b_channel, g_channel, r_channel = cv2.split(image)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
    image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))

    return image
项目:colorcs    作者:ch3njust1n    | 项目源码 | 文件源码
def load(self):
        if self.path is None:
            print "Current path is empty!"
            print "Please set one!"
        else:
            try:
                # Return a 3-channel color image
                self.image = cv2.imread(self.path)
                # cv2.imshow('f', self.image)
                # cv2.waitKey(0)
                # cv2.destroyAllWindows()
            except:
                raise ValueError('Loading error!')

        # convert RGB to HSV
        self.hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)

        # split image into HSV channels
        self.h, self.s, self.v = cv2.split(self.hsv)

    # Apply Gaussian noise and save
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def extractPlantsArea(self, arg_mode=0,arg_INV= False, b_threshold=80, a_threshold=80):
        zeros = np.zeros(self.image.shape[:2], dtype = "uint8")

        imgLAB = cv2.cvtColor(self.image, self.colorSpace)
        (L, A, B) = cv2.split(imgLAB)
        cv2.imwrite('Debug/imgB.jpg',B)
        cv2.imwrite('Debug/imgA.jpg',A)
        #(T_weeds_b, thresh_weeds_b) = cv2.threshold(B, b_threshold, 255, cv2.THRESH_BINARY)
        #(T_weeds_a, thresh_weeds_a) = cv2.threshold(A, a_threshold, 255, cv2.THRESH_BINARY)
        if arg_mode==0:
            thresh_weeds_a= imgProcess_tool.binarialization(A,0,arg_INV, a_threshold)
            thresh_weeds_b= imgProcess_tool.binarialization(B,0,arg_INV, b_threshold)
        elif arg_mode==1:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 1, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 1, arg_INV)
        elif arg_mode==2:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 2, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 2, arg_INV)
        cv2.imwrite('Debug/imgB_thr.jpg',thresh_weeds_b)
        cv2.imwrite('Debug/imgA_thr.jpg',thresh_weeds_a)
        imgRGB = cv2.merge([zeros, thresh_weeds_b, thresh_weeds_a])
        return thresh_weeds_a, thresh_weeds_b
项目:test-automation    作者:openstax    | 项目源码 | 文件源码
def mean_squares(self):
        self.threshold = 0.00000000001        
        b_i,g_i,r_i = cv2.split(self.image_i)
        b_j,g_j,r_j = cv2.split(self.image_j)

        error_b = b_i - b_j
        error_g = g_i - g_j
        error_r = r_i - r_j

        error_b = error_b.flatten()
        error_g = error_g.flatten()
        error_r = error_r.flatten()

        mse_b = float(numpy.dot(error_b,error_b))/len(error_b) 
        mse_g = float(numpy.dot(error_g,error_g))/len(error_g)

        mse_r = float(numpy.dot(error_r,error_r))/len(error_r)
        self.measure = (mse_b + mse_g + mse_r)/3
        self.assertLess(self.measure, self.threshold)
        print self.measure
项目:test-automation    作者:openstax    | 项目源码 | 文件源码
def mean_squares(self):
        self.threshold = 0.00000000001        
        b_i,g_i,r_i = cv2.split(self.image_i)
        b_j,g_j,r_j = cv2.split(self.image_j)

        error_b = b_i - b_j
        error_g = g_i - g_j
        error_r = r_i - r_j

        error_b = error_b.flatten()
        error_g = error_g.flatten()
        error_r = error_r.flatten()

        mse_b = float(numpy.dot(error_b,error_b))/len(error_b) 
        mse_g = float(numpy.dot(error_g,error_g))/len(error_g)

        mse_r = float(numpy.dot(error_r,error_r))/len(error_r)
        self.measure = (mse_b + mse_g + mse_r)/3
        self.assertLess(self.measure, self.threshold)
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def main():
    imgList = getImageList(input_folder='/home/jin/shenzhenyuan/head-segmentation/input/test',
                           output_file='/home/jin/shenzhenyuan/head-segmentation/input/testSet.txt')
    for img_path in imgList:
        img = cv2.imread('{}'.format(img_path))
        if img_path[:img_path.rfind('.')].endswith('png'):
            str = img_path[:img_path.rfind('.')] + '-seg.png'
        else:
            str = img_path[:img_path.rfind('.')] + '.png-seg.png'
        mask = cv2.imread('{}'.format(str))
        prob = mask[:,:,0:2] / 255.0
        prob[:, :, 1] = 1 - prob[:, :, 0]
        res, Q = denseCRF(img, prob)
        a = 1-res
        a = a.astype('uint8')

        r_channel, g_channel, b_channel = cv2.split(img)
        img_rgba = cv2.merge((r_channel, g_channel, b_channel, a*255))
        cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), img_rgba)

        # a = np.dstack((a,)*3)
        # plt.imshow(a*img)
        # cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), (a>0.1)*img)

        cv2.imwrite('{}_crf_qtsu.png'.format(img_path[:img_path.find('.')]), cropHead(Q, img))
项目:Bayesian-FlowNet    作者:Johswald    | 项目源码 | 文件源码
def flows_to_img(flows):
    """Pyfunc wrapper to transorm flow vectors in color coding"""

    def _flow_transform(flows):
        """ Tensorflow Pyfunc to transorm flow to color coding"""

        flow_imgs = []
        for flow in flows:
            img = computeColor.computeImg(flow)
            # cv2 returns bgr images
            b, g, r = cv2.split(img)
            img = cv2.merge((r, g, b))
            flow_imgs.append(img)
        return [flow_imgs]

    flow_imgs = tf.py_func(_flow_transform, [flows],
                           [tf.uint8], stateful=False, name='flow_transform')

    flow_imgs = tf.squeeze(tf.stack(flow_imgs))
    flow_imgs.set_shape([FLAGS.batchsize] + FLAGS.d_shape_img)
    return flow_imgs
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
项目:2017-Vision    作者:RoboticsTeam4904    | 项目源码 | 文件源码
def test():
    displace()
    start = time.clock()
    newCalibrate()
    exposure = WebCam.getExposure()
    print time.clock() - start, "TOTAL TIME"

    while display:
        image = WebCam.getImage()
        contours = GripRunner.run(image)
        Printing.drawContours(image, contours)
        Printing.display(image)
        cv2.waitKey(20)

# Get average value at the end of test to recalibrate targetAverage
    # image = cv2.imread('TestImages/Cancer.jpg')
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # value = cv2.split(image)[2]
    # # value = np.array([image[:,:,2]])
    # average = cv2.mean(value)
    # print average
项目:BlindWaterMark    作者:chishaxie    | 项目源码 | 文件源码
def bgr_to_rgb(img):
    b, g, r = cv2.split(img)
    return cv2.merge([r, g, b])
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max):
    """
    Threshold an HSV image given separate min/max values for each channel.
    :param img: an hsv image
    :param hue_min:
    :param hue_max:
    :param sat_min:
    :param sat_max:
    :param val_min:
    :param val_max:
    :return: result of the threshold (each binary channel AND'ed together)
    """

    hue, sat, val = cv2.split(img)

    hue_bin = np.zeros(hue.shape, dtype=np.uint8)
    sat_bin = np.zeros(sat.shape, dtype=np.uint8)
    val_bin = np.zeros(val.shape, dtype=np.uint8)

    cv2.inRange(hue, hue_min, hue_max, hue_bin)
    cv2.inRange(sat, sat_min, sat_max, sat_bin)
    cv2.inRange(val, val_min, val_max, val_bin)

    bin = np.copy(hue_bin)
    cv2.bitwise_and(sat_bin, bin, bin)
    cv2.bitwise_and(val_bin, bin, bin)

    return bin
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def extract(self,ori_wmimage,wm,key=10):
        B = ori_wmimage
        if len(ori_wmimage.shape ) > 2 :
            (B,G,R) = cv2.split(cv2.cvtColor(ori_wmimage, cv2.COLOR_BGR2YUV))

        signature = BlindWatermark._gene_signature(wm,256,key).flatten()

        ext_sig = self.inner_extract(B,signature)
        return BlindWatermark.calc_sim(signature,ext_sig)
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def add_alpha_channel(img):
    # img = cv2.imread(path)
    b_channel, g_channel, r_channel = cv2.split(img)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255 #creating a dummy alpha channel image.
    return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def assert_dir(dir_path):
    potential_out_dir = dir_path
    idx = -1
    while os.path.isdir(potential_out_dir):
        idx += 1
        if idx == 0:
            potential_out_dir += "_0"
            continue
        potential_out_dir = "_".join( potential_out_dir.split("_")[:-1] ) +  "_" + str(idx)  
    out_dir = potential_out_dir
    os.mkdir(out_dir)
    print "[+] Created " + out_dir + ". and will save output to that directory" 
    return out_dir
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def run_pathanalyzer(obj,app=None,forcereload=True,createFC=False):
    ''' split path into intervals '''

    import reconstruction.pathanalyser
    if forcereload: reload(reconstruction.pathanalyser)

    if app <> None:
        FreeCAD.app=app

    try: obj.Proxy.pl2
    except:
        sayexc("no data - run FindPathes first")
        errorDialog("no data - run FindPathes first")
        return

    try: widget=obj.Proxy.analyzer
    except: widget=None

    hideApprox=obj.hideApproximation

    if obj.pathSelection: # process selected path
        analyzer=reconstruction.pathanalyser.runsel(obj.N,obj.Threshold,widget,createFC,obj)
    elif obj.pathId==-1: # process pathObject
        analyzer=reconstruction.pathanalyser.runobj(obj.pathObject,obj.N,obj.Threshold,widget,createFC,obj)
    else: # process object by index number
        analyzer=reconstruction.pathanalyser.run(obj.Proxy.pl2,obj.pathId,obj.N,obj.Threshold,widget,createFC,obj)

    obj.Proxy.analyzer=analyzer
    return
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Color(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    b,g,r = cv2.split(img)

    if obj.red:
        obj.Proxy.img = cv2.cvtColor(r, cv2.COLOR_GRAY2RGB)
    if obj.blue:
        obj.Proxy.img = cv2.cvtColor(b, cv2.COLOR_GRAY2RGB)
    if obj.green:
        obj.Proxy.img = cv2.cvtColor(255-g, cv2.COLOR_GRAY2RGB)
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object

        res=None
        for t in obj.OutList:
            print t.Label
            img=t.ViewObject.Proxy.img.copy()
            if res==None:
                res=img.copy()
            else:
                #rr=cv2.subtract(res,img)
                #rr=cv2.add(res,img)

                aw=0.0+float(obj.aWeight)/100
                bw=0.0+float(obj.bWeight)/100
                print aw
                print bw
                if obj.aInverse:
                    # b umsetzen
                    ret, mask = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY)
                    img=cv2.bitwise_not(mask)
                rr=cv2.addWeighted(res,aw,img,bw,0)
                res=rr
        #b,g,r = cv2.split(res)
        cv2.imshow(obj.Label,res)
        #cv2.imshow(obj.Label +" b",b)
        #cv2.imshow(obj.Label + " g",g)
        #cv2.imshow(obj.Label + " r",r)

        res=img

        if not obj.matplotlib:
            cv2.imshow(obj.Label,img)
        else:
            from matplotlib import pyplot as plt
            # plt.subplot(121),
            plt.imshow(img,cmap = 'gray')
            plt.title(obj.Label), plt.xticks([]), plt.yticks([])
            plt.show()

        self.img=img
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def binaryContoursNestingFilterHeuristic(img, cnts, *args, **kwargs):
    '''
    Concept  : Use the found contours, with binary drawn contours to extract hierarchy and hence filter on nesting.
    Critique : WIP
    '''
    # Set the image to black (0): 
    img[:,:] = (0,0,0)
    # Draw all of the contours on the image in white
    contours = [c.contour for c in cnts]
    cv2.drawContours( img, contours, -1, (255, 255, 255), 1 )
    iv = ImageViewer(img)
    iv.windowShow()
    # Now extract any channel
    gray = cv2.split(img)[0]
    iv = ImageViewer(gray)
    iv.windowShow()
    retval, bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Now find the contours again, but this time we care about hierarchy (hence _TREE) - we get back next, previous, first_child, parent
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Alternative flags : only take the external contours
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    return cnts
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def comment(self, msg):
        if type(msg) == type(''):
            self.fh.writelines('%s\n' % msg)
        else:
            lines = '%s' % msg
            newlines = []
            for l in lines.split('\n'):
                newlines.append('\t%s' % l)
            self.fh.writelines('\n'.join(newlines))
        print('%s' % msg)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def addFrame(self, frame, width=300):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w * 2, self.h * 2), True)
            self.zeros = np.zeros((self.h, self.w), dtype="uint8")

        # break the image into its RGB components, then construct the
        # RGB representation of each frame individually
        (B, G, R) = cv2.split(frame)
        R = cv2.merge([self.zeros, self.zeros, R])
        G = cv2.merge([self.zeros, G, self.zeros])
        B = cv2.merge([B, self.zeros, self.zeros])

        # construct the final output frame, storing the original frame
        # at the top-left, the red channel in the top-right, the green
        # channel in the bottom-right, and the blue channel in the
        # bottom-left
        output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
        output[0:self.h, 0:self.w] = frame
        output[0:self.h, self.w:self.w * 2] = R
        output[self.h:self.h * 2, self.w:self.w * 2] = G
        output[self.h:self.h * 2, 0:self.w] = B

        # write the output frame to file
        self.writer.write(output)
项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def compute(self, img):
        averages = np.zeros((self.rows,self.cols,3))
        imgH, imgW, _ = img.shape
        for row in range(self.rows):
            for col in range(self.cols):
                slice = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)]
                average_color_per_row = np.mean(slice, axis=0)
                average_color = np.mean(average_color_per_row, axis=0)
                average_color = np.uint8(average_color)
                averages[row][col][0] = average_color[0]
                averages[row][col][1] = average_color[1]
                averages[row][col][2] = average_color[2]
        icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB)
        y, cr, cb = cv2.split(icon)
        dct_y = cv2.dct(np.float32(y))
        dct_cb = cv2.dct(np.float32(cb))
        dct_cr = cv2.dct(np.float32(cr))
        dct_y_zigzag = []
        dct_cb_zigzag = []
        dct_cr_zigzag = []
        flip = True
        flipped_dct_y = np.fliplr(dct_y)
        flipped_dct_cb = np.fliplr(dct_cb)
        flipped_dct_cr = np.fliplr(dct_cr)
        for i in range(self.rows + self.cols -1):
            k_diag = self.rows - 1 - i
            diag_y = np.diag(flipped_dct_y, k=k_diag)
            diag_cb = np.diag(flipped_dct_cb, k=k_diag)
            diag_cr = np.diag(flipped_dct_cr, k=k_diag)
            if flip:
                diag_y = diag_y[::-1]
                diag_cb = diag_cb[::-1]
                diag_cr = diag_cr[::-1]
            dct_y_zigzag.append(diag_y)
            dct_cb_zigzag.append(diag_cb)
            dct_cr_zigzag.append(diag_cr)
            flip = not flip
        return np.concatenate([np.concatenate(dct_y_zigzag), np.concatenate(dct_cb_zigzag), np.concatenate(dct_cr_zigzag)])
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_detect():
    dev = AndroidDeviceMinicap()
    dev._adb.start_minitouch()
    time.sleep(3)

    d = SceneDetector('txxscene')
    old, new = None, None
    while True:
        # time.sleep(0.3)
        screen = dev.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))

        # find hsv
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        _, _, V = cv2.split(hsv)
        V[V<150] = 0
        cv2.imshow('V', V)
        _, _, L = cv2.split(hls)
        L[L<150] = 0
        cv2.imshow('H', L)

        tic = time.clock()
        new = str(d.detect(img))
        t = time.clock() - tic
        if new != old:
            print 'change to', new
            print 'cost time', t
        old = new

        for _, r in d.current_scene:
            x, y, x1, y1 = r
            cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2)
        cv2.imshow('test', img)
        cv2.waitKey(1)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
项目:tensorflow-pi    作者:karaage0703    | 项目源码 | 文件源码
def equalizeHistRGB(src):

    RGB = cv2.split(src)
    Blue   = RGB[0]
    Green = RGB[1]
    Red    = RGB[2]
    for i in range(3):
        cv2.equalizeHist(RGB[i])

    img_hist = cv2.merge([RGB[0],RGB[1], RGB[2]])
    return img_hist

# ????????
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def rgb(bgr_img):
    b,g,r = cv.split(bgr_img)       # get b,g,r
    rgb_img = cv.merge([r,g,b])     # switch it to rgb
    return rgb_img

# Given directory loc, get all images in directory and crop to just faces
# Returns face_list, an array of cropped image file names
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def toggleRGB(img):
  r,g,b = cv.split(img)
  img = cv.merge([b,g,r])
  return img

# Combine two images for displaying side-by-side
# If maxSize is true, crops sides of image to keep under 2880 pixels in width
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def load_hat(self, path):  # pylint: disable=no-self-use
        """Loads the hat from a picture at path.

        Args:
            path: The path to load from

        Returns:
            The hat data.
        """
        hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if hat is None:
            raise ValueError('No hat image found at `{}`'.format(path))
        b, g, r, a = cv2.split(hat)
        return cv2.merge((r, g, b, a))
项目:face-landmark    作者:lsy17096535    | 项目源码 | 文件源码
def predict(self, resized):
        """
        @resized: image 40,40 already pre processed 
        """         
        #self.net.blobs['data'].data[...] = cv2.split(resized)
        self.net.blobs['data'].data[...] = resized.reshape(1,1,60,60)
        prediction = self.net.forward()['Dense3'][0]
        return prediction
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def removebg(segmented_img):
    src = cv2.imdecode(np.squeeze(np.asarray(segmented_img[1])), 1)
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    processed_img = cv2.imencode('.png', dst)

    return processed_img
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_detect():
    dev = AndroidDeviceMinicap()
    dev._adb.start_minitouch()
    time.sleep(3)

    d = SceneDetector('txxscene')
    old, new = None, None
    while True:
        # time.sleep(0.3)
        screen = dev.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))

        # find hsv
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        _, _, V = cv2.split(hsv)
        V[V<150] = 0
        cv2.imshow('V', V)
        _, _, L = cv2.split(hls)
        L[L<150] = 0
        cv2.imshow('H', L)

        tic = time.clock()
        new = str(d.detect(img))
        t = time.clock() - tic
        if new != old:
            print 'change to', new
            print 'cost time', t
        old = new

        for _, r in d.current_scene:
            x, y, x1, y1 = r
            cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2)
        cv2.imshow('test', img)
        cv2.waitKey(1)
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def extract_features():
    pos_img_path = positive_images_path
    neg_img_path = negative_images_path

    pos_feat_path = positive_features_path
    neg_feat_path = negative_features_path

    if not os.path.isdir(pos_feat_path):
        os.makedirs(pos_feat_path)

    if not os.path.isdir(neg_feat_path):
        os.makedirs(neg_feat_path)

    print "Extracting positive features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(pos_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(pos_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(pos_img_path, "*")))))

    print "Extracting negative features"
    progress = 0.0
    for im_path in glob.glob(os.path.join(neg_img_path, "*")):
        im = imread(im_path)
        im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
        im = cv2.split(im_ycbcr)[0]
        feature_vector = hog(image=im, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)
        feature_name = os.path.split(im_path)[1].split(".")[0] + ".feat"
        feature_path = os.path.join(neg_feat_path, feature_name)
        joblib.dump(feature_vector, feature_path)
        progress += 1.0
        update_progress(progress/float(len(glob.glob(os.path.join(neg_img_path, "*")))))
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def get_regions(roi_path):
    f = open(roi_path, 'r').read()
    f = f.split("\n")
    f = [i.split() for i in f]
    return f
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def test_classifier(img_path, roi_path):
    model_path = classifier_model_path
    # Load the classifier
    clf = joblib.load(model_path)

    max_win_y = 171
    max_win_x = 70

    detections = []

    regions = get_regions(roi_path)

    im = imread(img_path)
    im_ycbcr = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB)
    im = cv2.split(im_ycbcr)[0]

    for region in regions:
        x = int(float(region[0])*1000)
        y = int(float(region[1])*1000)

        im_window = im[y: y + max_win_y, x: x + max_win_x]

        fd = hog(image=im_window, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), visualise=False)

        if len(fd) == 9234:
            prediction = clf.predict(fd.reshape(1, -1))

            if prediction == 1:
                print "Detection:: Location -> ({}, {})".format(x, y)
                print "Confidence Score {} \n".format(clf.decision_function(fd))
                detections.append((x, y, clf.decision_function(fd)))

    im = imread(img_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

    for (x_tl, y_tl, _) in detections:
        cv2.rectangle(im, (x_tl, y_tl), (x_tl+max_win_x, y_tl+max_win_y), (0, 255, 0), thickness=1)
    cv2.imwrite("result.png", im)
项目:traffic-light-detection    作者:ranveeraggarwal    | 项目源码 | 文件源码
def gen_neg():
    progress = 0.0
    cropped_images = []
    for i in range(9000):
        frame_number = str(random.randint(0, 11178))
        frame = 'frame_' + '0'*(6-len(frame_number)) + frame_number + '.jpg'
        img = cv2.imread("../lara_data/images/" + frame)
        height, width = img.shape[:2]
        x = random.randint(max_window_size[0], width - max_window_size[0])
        y = random.randint(max_window_size[1], height - max_window_size[1])
        up_limit = y - max_window_size[1]/2
        down_limit = y + max_window_size[1]/2
        left_limit = x - max_window_size[0]/2
        right_limit = x + max_window_size[0]/2
        cropped_img = img[up_limit: down_limit, left_limit: right_limit]
        h, w = cropped_img.shape[:2]
        if int(w) == int(max_window_size[0]) and int(h) == int(max_window_size[1]):
            cropped_images.append(cropped_img)
        progress += 1.0
        update_progress(progress/float(9000))

    print("Generating Negative Images")

    progress = 0.0

    i = 0
    for cropped_image in cropped_images:
        # out_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2YCR_CB)
        # out_image = cv2.split(out_image)[0]
        out_image = cropped_image
        image_name = "neg" + str(i) + ".ppm"
        image_path = os.path.join(neg_img_path, image_name)
        cv2.imwrite(image_path, out_image)
        progress += 1.0
        i += 1
        update_progress(progress/float(len(cropped_images)))