Python cv2 模块,IMREAD_GRAYSCALE 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.IMREAD_GRAYSCALE

项目:illumeme    作者:josmcg    | 项目源码 | 文件源码
def find_triangles(filename):
    FIRST = 0
    RED = (0, 0, 255)
    THICKNESS = 3
    copy = img = cv2.imread(filename)
    grey_img = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
    ret, thresh = cv2.threshold(grey_img, 127, 255, 1)
    contours, h = cv2.findContours(thresh, 1, 2)
    largest = None
    for contour in countours:
        approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
        if len(approx) == 3:
            #triangle found
            if largest is None or cv2.contourArea(contour) > cv2.contourArea(largest):
                largest = contour

    #write file
    cv2.drawContours(copy, [largest], FIRST, RED, THICKNESS)
    cv2.imwrite(filename +"_result", copy)
项目:WeiQiRecognition    作者:JDython    | 项目源码 | 文件源码
def lineRecognizer(path):
    '''
    :param path ????????
    :returns lines_data ?????????resize_pic ??????
    '''
    img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
    resize_pic=img
    #resize_pic=cv2.resize(img,(640,480),interpolation=cv2.INTER_CUBIC)
    edges = cv2.Canny(resize_pic,50,150)
    lines_data = cv2.HoughLines(edges,1,np.pi/180,150)
    return lines_data,resize_pic
项目:garden.facelock    作者:kivy-garden    | 项目源码 | 文件源码
def store_raw_images():
    '''To download images from image-net
        (Change the url for different needs of cascades)
    '''
    neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
    neg_image_urls = urllib2.urlopen(neg_images_link).read().decode()

    pic_num = 1

    for i in neg_image_urls.split('\n'):
        try:

            print i
            urllib.urlretrieve(i, "neg/" + str(pic_num) + '.jpg')
            img = cv2.imread("neg/" + str(pic_num) +'.jpg',
                                cv2.IMREAD_GRAYSCALE)
            resized_image = cv2.resize(img, (100, 100))
            cv2.imwrite("neg/" + str(pic_num) + '.jpg', resized_image)
            pic_num = pic_num + 1

        except:
            print "error"
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def GetFeature(image_path):
    #MinBlackRate, left_most_pixel_gradiant,  hill_number, average_hill_peak, average_hill_valley, BlackRate
    boundary_path = image_path.split(".")[0]+"_upper_boundary.txt"
    file = open(boundary_path)
    tmp_str = file.readline().strip()
    tmp_arr = tmp_str.split(" ")
    boundary = []
    for i in range(len(tmp_arr)):
        if tmp_arr[i]!="":
            boundary.append(int(tmp_arr[i]))
    boundary = np.array(boundary)
    file.close()
    image = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
    image = CropLowerBoundary(image)

    feature = MinGridBlackRate(image,boundary)+BlackRate(image,boundary)
    flag,tmp_feature = CountHill(boundary,image)
    if flag==False:
        return [False,feature]
    feature += tmp_feature
    return [True,feature]
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def load_cube_img(src_path, rows, cols, size):
    img = cv2.imread(src_path, cv2.IMREAD_GRAYSCALE)
    # assert rows * size == cube_img.shape[0]
    # assert cols * size == cube_img.shape[1]
    res = numpy.zeros((rows * cols, size, size))

    img_height = size
    img_width = size

    for row in range(rows):
        for col in range(cols):
            src_y = row * img_height
            src_x = col * img_width
            res[row * cols + col] = img[src_y:src_y + img_height, src_x:src_x + img_width]

    return res
项目:rosreestr2coord    作者:rendrom    | 项目源码 | 文件源码
def get_image_xy_corner(self):
        """get ?artesian coordinates from raster"""
        import cv2

        if not self.image_path:
            return False
        image_xy_corners = []
        img = cv2.imread(self.image_path, cv2.IMREAD_GRAYSCALE)
        imagem = (255 - img)

        try:
            ret, thresh = cv2.threshold(imagem, 10, 128, cv2.THRESH_BINARY)
            try:
                contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            except Exception:
                im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

            hierarchy = hierarchy[0]
            hierarhy_contours = [[] for _ in range(len(hierarchy))]
            for fry in range(len(contours)):
                currentContour = contours[fry]
                currentHierarchy = hierarchy[fry]
                cc = []
                # epsilon = 0.0005 * cv2.arcLength(contours[len(contours) - 1], True)
                approx = cv2.approxPolyDP(currentContour, self.epsilon, True)
                if len(approx) > 2:
                    for c in approx:
                        cc.append([c[0][0], c[0][1]])
                    parent_index = currentHierarchy[3]
                    index = fry if parent_index < 0 else parent_index
                    hierarhy_contours[index].append(cc)

            image_xy_corners = [c for c in hierarhy_contours if len(c) > 0]
            return image_xy_corners
        except Exception as ex:
            self.error(ex)
        return image_xy_corners
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def valid_generator():
    while True:
        for start in range(0, len(ids_valid_split), batch_size):
            x_batch = []
            y_batch = []
            end = min(start + batch_size, len(ids_valid_split))
            ids_valid_batch = ids_valid_split[start:end]
            for id in ids_valid_batch.values:
                img = cv2.imread('D:\Datasets_HDD\Carvana\\train\\{}.jpg'.format(id))
                img = cv2.resize(img, (input_size, input_size))
                mask = cv2.imread('D:\Datasets_HDD\Carvana\\output_masks\\{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
                mask = cv2.resize(mask, (input_size, input_size))
                mask = np.expand_dims(mask, axis=2)
                x_batch.append(img)
                y_batch.append(mask)
            x_batch = np.array(x_batch, np.float32) / 255
            y_batch = np.array(y_batch, np.float32) / 255
            yield x_batch, y_batch
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def read_img(path, s_size):
    image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if image1.shape[0] < image1.shape[1]:
        s0 = s_size
        s1 = int(image1.shape[1] * (s_size / image1.shape[0]))
        s1 = s1 - s1 % 16
    else:
        s1 = s_size
        s0 = int(image1.shape[0] * (s_size / image1.shape[1]))
        s0 = s0 - s0 % 16

    image1 = np.asarray(image1, np.float32)
    image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA)

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    return image1.transpose(2, 0, 1), False
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def read_img(path, s_size):
    image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if image1.shape[0] < image1.shape[1]:
        s0 = s_size
        s1 = int(image1.shape[1] * (s_size / image1.shape[0]))
        s1 = s1 - s1 % 16
    else:
        s1 = s_size
        s0 = int(image1.shape[0] * (s_size / image1.shape[1]))
        s0 = s0 - s0 % 16

    image1 = np.asarray(image1, np.float32)
    image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA)

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    return image1.transpose(2, 0, 1), False
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def read_img(path, s_size):
    image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if image1.shape[0] < image1.shape[1]:
        s0 = s_size
        s1 = int(image1.shape[1] * (s_size / image1.shape[0]))
        s1 = s1 - s1 % 16
    else:
        s1 = s_size
        s0 = int(image1.shape[0] * (s_size / image1.shape[1]))
        s0 = s0 - s0 % 16

    image1 = np.asarray(image1, np.float32)
    image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA)

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    return image1.transpose(2, 0, 1), False
项目:ultrasound-nerve-segmentation    作者:EdwardTyantov    | 项目源码 | 文件源码
def create_test_data():
    train_data_path = os.path.join(data_path, 'test')
    images = os.listdir(train_data_path)
    total = len(images)

    imgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
    imgs_id = np.ndarray((total, ), dtype=np.int32)

    i = 0
    print('Creating test images...')
    for image_name in images:
        img_id = int(image_name.split('.')[0])
        img = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)

        imgs[i, 0] = img
        imgs_id[i] = img_id

        if i % 100 == 0:
            print('Done: {0}/{1} images'.format(i, total))
        i += 1
    print('Loading done.')

    np.save(img_test_path, imgs)
    np.save(img_test_id_path, imgs_id)
    print('Saving to .npy files done.')
项目:Nerve-Segmentation    作者:matthewzhou    | 项目源码 | 文件源码
def find_best_mask():
    #adjust file path for raw data directory
    files = glob.glob(os.path.join("/Users/matthewzhou/Desktop/Nerve/P5_Submission_Folder/", "raw", "trainsample", "*_mask.tif"))
    overall_mask = cv2.imread(files[0], cv2.IMREAD_GRAYSCALE)
    overall_mask.fill(0)
    overall_mask = overall_mask.astype(np.float32)

    for fl in files:
        mask = cv2.imread(fl, cv2.IMREAD_GRAYSCALE)
        overall_mask += mask
    overall_mask /= 255
    max_value = overall_mask.max()
    koeff = 0.5
    #if the overall_mask pixel value is 
    overall_mask[overall_mask < koeff * max_value] = 0
    overall_mask[overall_mask >= koeff * max_value] = 255
    overall_mask = overall_mask.astype(np.uint8)
    return overall_mask
项目:faceRecognitionforRaspPi    作者:mgudesblatart    | 项目源码 | 文件源码
def read_images (path, sz=None):
        c = 0
        X,y = [], []
        for dirname, dirnames, filenames in os.walk(path):
            for subdirname in dirnames:
                subject_path = os.path.join(dirname, subdirname)
                for filename in os.listdir(subject_path):
                    try:
                        if (filename == ".drectory"):
                            continue
                        filepath = os.path.join(subject_path, filename)
                        im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)

                        if (sz is not None):
                                im = cv2.resize(im, sz)
                        X.append(np.asarray(im, dtype=np.uint8))
                        y.append(c)
                    except IOError, (errno, strerror):
                            print "I/O error({0}): {1}".format(errno,strerror)
                    except:
                            print "Unexpected error:", sys.exec_info()[0]
                            raise
                c= c+1

        return [X,y]
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def valid_generator():
    while True:
        for start in range(0, len(ids_valid_split), batch_size):
            x_batch = []
            y_batch = []
            end = min(start + batch_size, len(ids_valid_split))
            ids_valid_batch = ids_valid_split[start:end]
            for id in ids_valid_batch.values:
                img = cv2.imread('input/train/{}.jpg'.format(id))
                img = cv2.resize(img, (input_size, input_size))
                mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
                mask = cv2.resize(mask, (input_size, input_size))
                mask = np.expand_dims(mask, axis=2)
                x_batch.append(img)
                y_batch.append(mask)
            x_batch = np.array(x_batch, np.float32) / 255
            y_batch = np.array(y_batch, np.float32) / 255
            yield x_batch, y_batch
项目:kaggle_ndsb2017    作者:juliandewit    | 项目源码 | 文件源码
def load_cube_img(src_path, rows, cols, size):
    img = cv2.imread(src_path, cv2.IMREAD_GRAYSCALE)
    # assert rows * size == cube_img.shape[0]
    # assert cols * size == cube_img.shape[1]
    res = numpy.zeros((rows * cols, size, size))

    img_height = size
    img_width = size

    for row in range(rows):
        for col in range(cols):
            src_y = row * img_height
            src_x = col * img_width
            res[row * cols + col] = img[src_y:src_y + img_height, src_x:src_x + img_width]

    return res
项目:main    作者:templerobotics    | 项目源码 | 文件源码
def pos_images():

        #Edit this for new path of positive imges

        pos_path = '/path/folder'

        files = [f for f in listdir(pos_path) if isfile(join(pos_path,f)) ]

        #empty array with the size of the amount of files we have
        images = numpy.empty(len(files), dtype=object)

        pos_num = 1


        #cycle throw positives
        for n in range(0, len(files)):
          img[n] = cv2.imread( join(pos_path,files[n]),cv2.IMREAD_GRAYSCALE)
          img_resize = cv2.resize(img[n], (45, 45))
          cv2.imwrite("pos/"+str(pos_num)+".jpg",img_resize)
          pos_num+=1


    #Use to pull and resize negative images from image-net
项目:main    作者:templerobotics    | 项目源码 | 文件源码
def store_neg_images():
        neg_images_link = 'image-net url for negative images'   
        neg_image_urls = urllib.request.urlopen(neg_images_link).read().decode()
        #pic_num stands for picture index on the repo
        pic_num = 1

        if not os.path.exists('neg'):
            os.makedirs('neg')

        for i in neg_image_urls.split('\n'):
            try:
                print(i)
                urllib.request.urlretrieve(i, "neg/"+str(pic_num)+".jpg")
                neg_img = cv2.imread("neg/"+str(pic_num)+".jpg",cv2.IMREAD_GRAYSCALE)
                # should be larger than samples / pos pic (so we can place our image on it)
                neg_resize = cv2.resize(img, (100, 100))
                cv2.imwrite("neg/"+str(pic_num)+".jpg",neg_resize)
                pic_num += 1

            except Exception as e:
                print(str(e))
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def startDirectionTest(self, filename_without_extension):
        # Load image
        image = cv2.imread(os.path.join(self.path_to_test_data, filename_without_extension + '.jpg'), cv2.IMREAD_GRAYSCALE)
        print(os.path.join(self.path_to_test_data, filename_without_extension + '.jpg'))

        if image is None:
            raise TypeError

        # Start timer
        start_time = time.time()

        # Process image for start.
        start_direction = image_analysis.find_start_direction(image)

        # Print time taken.
        end_time = time.time()
        print('Time Taken for ' + filename_without_extension + ': ' + str(end_time - start_time))

        return start_direction
项目:Grand-Order-Reroller    作者:chaosking121    | 项目源码 | 文件源码
def identify_summons(image_path):
    import cv2
    import numpy as np

    image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY)
    summons = []
    points = 0

    for file_name, (point_value, actual_name) in possible_summons.items():
        template = cv2.imread(os.path.join('screenshots', 'summons', file_name + '.png'), cv2.IMREAD_GRAYSCALE)

        res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= CLOSENESS_THRESHOLD)

        for pt in zip(*loc[::-1]):

            # Due to weird behaviour, only add one instance of each summon
            if actual_name in summons:
                continue
            summons.append(actual_name)
            points += point_value

    return (summons, points)
项目:Grand-Order-Reroller    作者:chaosking121    | 项目源码 | 文件源码
def image_is_on_screen(template_name):
    template = cv2.imread(os.path.join(
                                'screenshots', 
                                template_name + '.png'), 
                    cv2.IMREAD_GRAYSCALE)
    image = cv2.cvtColor(
                np.array(pyautogui.screenshot(
                        region=(0, 0, 1300, 750))), 
                cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= CLOSENESS_THRESHOLD)

    # Not sure why this works but okay
    for pt in zip(*loc[::-1]):
        return True

    return False
项目:async_face_recognition    作者:dpdornseifer    | 项目源码 | 文件源码
def _cascade_detect(self, raw_image):
        ''' use opencv cascades to recognize objects on the incomming images '''
        cascade = cv2.CascadeClassifier(self._cascade)
        image = np.asarray(bytearray(raw_image), dtype="uint8")

        gray_image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
        color_image = cv2.imdecode(image, cv2.IMREAD_ANYCOLOR)

        coordinates = cascade.detectMultiScale(
            gray_image,
            scaleFactor=1.15,
            minNeighbors=5,
            minSize=(30, 30)
        )

        for (x, y, w, h) in coordinates:
            cv2.rectangle(color_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            self._logger.debug("face recognized at: x: {}, y: {}, w: {}, h: {}".format(x, y, w, h))

        return color_image, self._tojson(coordinates)
项目:closed-form-matting    作者:MarcoForte    | 项目源码 | 文件源码
def test_solution_close_to_original_implementation(self):
        image = cv2.imread('testdata/source.png', cv2.IMREAD_COLOR) / 255.0
        scribles = cv2.imread('testdata/scribbles.png', cv2.IMREAD_COLOR) / 255.0

        alpha = closed_form_matting.closed_form_matting_with_scribbles(image, scribles)
        foreground, background = solve_foreground_background(image, alpha)

        matlab_alpha = cv2.imread('testdata/matlab_alpha.png', cv2.IMREAD_GRAYSCALE) / 255.0
        matlab_foreground = cv2.imread('testdata/matlab_foreground.png', cv2.IMREAD_COLOR) / 255.0
        matlab_background = cv2.imread('testdata/matlab_background.png', cv2.IMREAD_COLOR) / 255.0

        sad_alpha = np.mean(np.abs(alpha - matlab_alpha))
        sad_foreground = np.mean(np.abs(foreground - matlab_foreground))
        sad_background = np.mean(np.abs(background - matlab_background))

        self.assertLess(sad_alpha, 1e-2)
        self.assertLess(sad_foreground, 1e-2)
        self.assertLess(sad_background, 1e-2)
项目:crnn    作者:wulivicte    | 项目源码 | 文件源码
def checkImageIsValid(imageBin):
    if imageBin is None:
        return False
    try:
        imageBuf = np.fromstring(imageBin, dtype=np.uint8)
        img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
        imgH, imgW = img.shape[0], img.shape[1]
    except:
        return False
    else:
        if imgH * imgW == 0:
            return False        
    return True
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def imread_process_cb(scale=1.0, grayscale=False):
        return lambda fn: im_resize(cv2.imread(fn, cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED), scale=scale)
项目:opencv-gui-helper-tool    作者:maunesh    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.')
    parser.add_argument('filename')

    args = parser.parse_args()

    img = cv2.imread(args.filename, cv2.IMREAD_GRAYSCALE)

    cv2.imshow('input', img)

    edge_finder = EdgeFinder(img, filter_size=13, threshold1=28, threshold2=115)

    print "Edge parameters:"
    print "GaussianBlur Filter Size: %f" % edge_finder.filterSize()
    print "Threshold1: %f" % edge_finder.threshold1()
    print "Threshold2: %f" % edge_finder.threshold2()

    (head, tail) = os.path.split(args.filename)

    (root, ext) = os.path.splitext(tail)

    smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext)
    edge_filename = os.path.join("output_images", root + "-edges" + ext)

    cv2.imwrite(smoothed_filename, edge_finder.smoothedImage())
    cv2.imwrite(edge_filename, edge_finder.edgeImage())

    cv2.destroyAllWindows()
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def test_blindwm(alg,imgname,wmname,times=1):
    handle = script.dctwm

    if alg == 'DCT':
        handle  = script.dctwm
    if alg == 'DWT':
        handle  = script.dwtwm

    print('\n##############??'+alg+'???????????')

    btime=time.time() 
    for i in range(times):
        img = cv2.imread('./data/'+imgname)
        wm  = cv2.imread('./data/'+wmname,cv2.IMREAD_GRAYSCALE)
        wmd = handle.embed(img,wm)
        outname = './output/'+alg+'_'+imgname

    cv2.imwrite(outname,wmd)
    print('?????????? :{},???? ?{} ?? ,psnr : {}'.format(outname,int((time.time()-btime)*1000/times),psnr(img,wmd)))

    for  k,v in attack_list.items():
        wmd = attack(outname,k)
        cv2.imwrite('./output/attack/'+k+'_'+imgname,wmd)
        btime=time.time() 
        wm  = cv2.imread('./data/'+wmname,cv2.IMREAD_GRAYSCALE)
        sim = handle.extract(wmd,wm) 
        print('{:10} : ???? {}??????????{} ,???{} ??.'.format(v,'??' if sim>0.7 else '??'  ,sim,int((time.time()-btime)*1000)))
项目:DogvsCat    作者:aysebilgegunduz    | 项目源码 | 文件源码
def read_image(file_path):
    img = cv2.imread(file_path, cv2.IMREAD_COLOR)  # cv2.IMREAD_GRAYSCALE
    if (img.shape[0] >= img.shape[1]):  # height is greater than width
        resizeto = (IMAGE_SIZE, int(round(IMAGE_SIZE * (float(img.shape[1]) / img.shape[0]))));
    else:
        resizeto = (int(round(IMAGE_SIZE * (float(img.shape[0]) / img.shape[1]))), IMAGE_SIZE);

    img2 = cv2.resize(img, (resizeto[1], resizeto[0]), interpolation=cv2.INTER_CUBIC)
    img3 = cv2.copyMakeBorder(img2, 0, IMAGE_SIZE - img2.shape[0], 0, IMAGE_SIZE - img2.shape[1], cv2.BORDER_CONSTANT,
                              0)

    return img3[:, :, ::-1]  # turn into rgb format
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def load_patient_images(patient_id, base_dir=None, wildcard="*.*", exclude_wildcards=[]):
    if base_dir == None:
        base_dir = settings.LUNA_16_TRAIN_DIR
    src_dir = base_dir + patient_id + "/"
    src_img_paths = glob.glob(src_dir + wildcard)
    for exclude_wildcard in exclude_wildcards:
        exclude_img_paths = glob.glob(src_dir + exclude_wildcard)
        src_img_paths = [im for im in src_img_paths if im not in exclude_img_paths]
    src_img_paths.sort()
    images = [cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) for img_path in src_img_paths]
    images = [im.reshape((1, ) + im.shape) for im in images]
    res = numpy.vstack(images)
    return res
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def main():
    testpic = cv2.imread('canvas.png', cv2.IMREAD_GRAYSCALE)
    bartype, bardata = passzbar(testpic)
    print(bardata.decode('utf-8'))
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def main():
    testpic = cv2.imread('resources/marker_50.png', cv2.IMREAD_GRAYSCALE)
    print(passpotrace(testpic).decode('utf-8'))
项目:meterOCR    作者:DBMSRmutl    | 项目源码 | 文件源码
def readimage():
    image = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE) # reading image
    if image is None:
        print 'Can not find the image!'
        exit(-1)
    return image

#-------------------------------------------------------------------------------------
项目:meterOCR    作者:DBMSRmutl    | 项目源码 | 文件源码
def readimage(filename):
    print filename
    image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) # reading image
    if image is None:
        print 'Can not find the image!'
        sys.stdin.read(1)
        exit(-1)
    return image

#-------------------------------------------------------------------------------------
项目:meterOCR    作者:DBMSRmutl    | 项目源码 | 文件源码
def readimage(filename):
    print filename
    image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) # reading image
    if image is None:
        print 'Can not find the image!'
        sys.stdin.read(1)
        exit(-1)
    return image

#-------------------------------------------------------------------------------------
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def test_swt(self):
        cv2.IMREAD_GRAYSCALE
        image = cv2.imread(os.path.join(self.photodir,"GloryDays","bib-sample.jpg"),cv2.IMREAD_GRAYSCALE)

        SWTImage = SWTScrubber.scrub(image)

        cv2.imwrite(os.path.join(self.photooutdir,"SWTImage.jpg"),SWTImage*255)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
项目:miaomiaoji-tool    作者:ihciah    | 项目源码 | 文件源码
def print_image(url):
    global mutex
    img_file = requests.get(url)
    image = np.asarray(bytearray(img_file.content), dtype='uint8')
    im = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
    pixels = ImageConverter.im2bmp(im)
    mutex.acquire()
    try:
        mmj = BtManager("69:68:63:69:61:68")
        if mmj.connected:
            stop = int(time.time()) + len(pixels) / 384 / 5
            mmj.sendImageToBt(pixels)
            mmj.disconnect()
            time_to_sleep = stop - int(time.time())
            time.sleep(time_to_sleep if time_to_sleep > 0 else 0)
    finally:
        mutex.release()
项目:CEAL-Medical-Image-Segmentation    作者:marc-gorriz    | 项目源码 | 文件源码
def create_train_data():
    """
    Generate training data numpy arrays and save them into the project path
    """

    image_rows = 420
    image_cols = 580

    images = os.listdir(data_path)
    masks = os.listdir(masks_path)
    total = len(images)

    imgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
    imgs_mask = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)

    for image_name in images:
        img = cv2.imread(os.path.join(data_path, image_name), cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (image_rows, image_cols), interpolation=cv2.INTER_CUBIC)
        img = np.array([img])
        imgs[i] = img

    for image_mask_name in masks:
        img_mask = cv2.imread(os.path.join(masks_path, image_mask_name), cv2.IMREAD_GRAYSCALE)
        img_mask = cv2.resize(img_mask, (image_rows, image_cols), interpolation=cv2.INTER_CUBIC)
        img_mask = np.array([img_mask])
        imgs_mask[i] = img_mask

    np.save('imgs_train.npy', imgs)
    np.save('imgs_mask_train.npy', imgs_mask)
项目:vse    作者:mkpaszkiewicz    | 项目源码 | 文件源码
def load_image(filename):
    """Reads an image from file. Image is being converted to grayscale and resized."""
    image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
    if image is None:
        raise ImageLoaderError(filename)
    return convert_image(image)
项目:vse    作者:mkpaszkiewicz    | 项目源码 | 文件源码
def load_image_from_buf(buf):
    """Reads an image from a buffer in memory. Image is being converted to grayscale and resized."""
    if len(buf) == 0:
        raise ImageLoaderError()
    image = cv2.imdecode(numpy.frombuffer(buf, numpy.uint8), cv2.IMREAD_GRAYSCALE)
    if image is None:
        raise ImageLoaderError()
    return convert_image(image)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _openImage(filename):
        return cv2.imread(filename, 
                        # cv2.IMREAD_ANYDEPTH | 
                        cv2.IMREAD_GRAYSCALE)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def imread(img, color=None, dtype=None):
    '''
    dtype = 'noUint', uint8, float, 'float', ...
    '''
    COLOR2CV = {'gray': cv2.IMREAD_GRAYSCALE,
                'all': cv2.IMREAD_COLOR,
                None: cv2.IMREAD_ANYCOLOR
                }
    c = COLOR2CV[color]
    if callable(img):
        img = img()
    elif isinstance(img, string_types):
        #         from_file = True
        #         try:
        #             ftype = img[img.find('.'):]
        #             img = READERS[ftype](img)[0]
        #         except KeyError:
        # open with openCV
        # grey - 8 bit
        if dtype in (None, "noUint") or np.dtype(dtype) != np.uint8:
            c |= cv2.IMREAD_ANYDEPTH
        img2 = cv2.imread(img, c)
        if img2 is None:
            raise IOError("image '%s' is not existing" % img)
        img = img2

    elif color == 'gray' and img.ndim == 3:  # multi channel img like rgb
        # cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64
        img = toGray(img)
    # transform array to uint8 array due to openCV restriction
    if dtype is not None:
        if isinstance(img, np.ndarray):
            img = _changeArrayDType(img, dtype, cutHigh=False)

    return img
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def open(self, filename):
        p = self.preferences
        # open in 8 bit?
        if p.p8bit.value():
            col = 0
        else:
            col = cv2.IMREAD_ANYDEPTH
        if p.pGrey.value() and not p.pSplitColors.value():
            col = col | cv2.IMREAD_GRAYSCALE
        else:
            col |= cv2.IMREAD_ANYCOLOR

        # OPEN
        img = cv2.imread(str(filename), col)  # cv2.IMREAD_UNCHANGED)
        if img is None:
            raise Exception("image '%s' doesn't exist" % filename)

        # crop
        if p.pCrop.value():
            r = (p.pCropX0.value(),
                 p.pCropX1.value(),
                 p.pCropY0.value(),
                 p.pCropY1.value())
            img = img[r[0]:r[1], r[2]:r[3]]

        # resize
        if p.pResize.value():
            img = cv2.resize(img, (p.pResizeX.value(), p.pResizeY.value()))

        labels = None
        if img.ndim == 3:
            if p.pSplitColors.value():
                img = np.transpose(img, axes=(2, 0, 1))
                labels = ['blue', 'green', 'red']
            else:
                # rgb convention
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # change data type to float
        img = self.toFloat(img)
        return img, labels
项目:Pytorch-Deeplab    作者:speedinghzl    | 项目源码 | 文件源码
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]
        if self.scale:
            image, label = self.generate_scale_label(image, label)
        image = np.asarray(image, np.float32)
        image -= self.mean
        img_h, img_w = label.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
            label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT,
                value=(self.ignore_label,))
        else:
            img_pad, label_pad = image, label

        img_h, img_w = label_pad.shape
        h_off = random.randint(0, img_h - self.crop_h)
        w_off = random.randint(0, img_w - self.crop_w)
        # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
        image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        #image = image[:, :, ::-1]  # change to BGR
        image = image.transpose((2, 0, 1))
        if self.is_mirror:
            flip = np.random.choice(2) * 2 - 1
            image = image[:, :, ::flip]
            label = label[:, ::flip]

        return image.copy(), label.copy(), np.array(size), name
项目:Pytorch-Deeplab    作者:speedinghzl    | 项目源码 | 文件源码
def __getitem__(self, index):
        datafiles = self.files[index]

        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]

        if self.scale:
            image, label = self.generate_scale_label(image, label)

        image = np.asarray(image, np.float32)
        image -= self.mean
        img_h, img_w = label.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
            label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT,
                value=(self.ignore_label,))
        else:
            img_pad, label_pad = image, label
        img_h, img_w = label_pad.shape

        h_off = random.randint(0, img_h - self.crop_h)
        w_off = random.randint(0, img_w - self.crop_w)

        # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
        image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        #image = image[:, :, ::-1]  # change to BGR
        image = image.transpose((2, 0, 1))
        if self.is_mirror:
            flip = np.random.choice(2) * 2 - 1
            image = image[:, :, ::flip]
            label = label[:, ::flip]

        return image.copy(), label.copy(), np.array(size), name
项目:VAE_GAN    作者:takat0m0    | 项目源码 | 文件源码
def get_figs(dir_name):
    ret = []
    for file_name in os.listdir(dir_name):
        #tmp = cv2.imread(os.path.join(dir_name, file_name), cv2.IMREAD_GRAYSCALE)
        #tmp = np.reshape(tmp, (64, 64, 1))
        tmp = cv2.imread(os.path.join(dir_name, file_name))
        ret.append(tmp/127.5 - 1.0)
    return np.asarray(ret, dtype = np.float32)
项目:LoginSimulation    作者:Byshx    | 项目源码 | 文件源码
def _deal_image_(self):
        img = cv2.imread(self.totalpath, cv2.IMREAD_GRAYSCALE)
        img = self._remove_line_(img)
        result = self._split_word_(img)
        # ??????????????????
        if len(result) == 0:
            print '???????...'
            while len(result) == 0:
                if self._generate_image_():
                    img, result = self._deal_image_()
            print '?????????'
        return img, result
项目:LoginSimulation    作者:Byshx    | 项目源码 | 文件源码
def _get_data_(num, pic_dict=path):
    # ???????
    imgdata = []
    labeldata = []
    # ?????
    checkcount = 50
    nowcount = 0

    # ???????????
    class Getoutofloop(Exception):
        pass

    try:
        while True:
            for root, dirs, files in os.walk(pic_dict):
                for dir in dirs:
                    for img in os.walk(os.path.join(root, dir)):
                        for imagename in img[2]:
                            if random.randint(0, 80) < 2:
                                image = cv2.imread(str(img[0]) + '/' + str(imagename), cv2.IMREAD_GRAYSCALE)
                                image = image.astype(np.float32)
                                image = np.multiply(image, 1.0 / 255.0)
                                imgdata.append(np.ravel(image))
                                tmplabel = img[0]
                                tmplabel = tmplabel[len(tmplabel) - 1]
                                labeldata.append(tmplabel)
                                nowcount += 1
                                if nowcount == checkcount:
                                    raise Getoutofloop()
                            else:
                                continue
    except Getoutofloop:
        pass
    imgdata = np.array(imgdata)
    labeldata = dc._one_hot_(np.array(labeldata))
    return imgdata, labeldata


# TensorFlow????
项目:NNProject_DeepMask    作者:abbypa    | 项目源码 | 文件源码
def prepare_expected_mask(mask_path):
    im = cv2.resize(cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE), (output_mask_size, output_mask_size)).astype(np.float32)
    # replace visible color with 1 (actual mask)
    im[im > 0] = 1
    # 0 -> -1
    im[im == 0] = -1
    return im
项目:CNN-LSTM-CTC-text-recognition    作者:oyxhust    | 项目源码 | 文件源码
def __iter__(self):
        #print('iter')
        init_state_names = [x[0] for x in self.init_states]
        for k in range(self.count):
            data = []
            label = []
            for i in range(self.batch_size):
                img_name = self.image_set_index[i + k*self.batch_size]
                img = cv2.imread(os.path.join(self.data_path, img_name + '.jpg'), cv2.IMREAD_GRAYSCALE)
                #img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                img = cv2.resize(img, self.data_shape)
                img = img.reshape((1, data_shape[1], data_shape[0]))
                #print(img)
                #img = img.transpose(1, 0)
                #img = img.reshape((data_shape[0] * data_shape[1]))
                img = np.multiply(img, 1/255.0)
                #print(img)
                data.append(img)
                ret = np.zeros(self.num_label, int)
                plate_str = self.gt[int(img_name)]
                #print(plate_str)
                for number in range(len(plate_str)):
                    ret[number] = self.classes.index(plate_str[number]) + 1
                #print(ret)
                label.append(ret)

            data_all = [mx.nd.array(data)] + self.init_state_arrays
            label_all = [mx.nd.array(label)]
            data_names = ['data'] + init_state_names
            label_names = ['label']


            data_batch = SimpleBatch(data_names, data_all, label_names, label_all)
            yield data_batch