Python matplotlib.pyplot 模块,imread() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.imread()

项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def resize_folder(folder,
                  extensions = ['jpg','jpeg','png'],
                  size = (100,100)):

    '''
        Resizes all images in a specified folder to given size and overwrites all images in a folder
        Inputs:
            folder: string, full path to the folder with images
            extensions: list, list of valid extensions
            size: tuple, size of the output images
    '''

    img_paths = get_images_from_directory(folder,extensions)

    resized_imgs = [imresize(plt.imread(im_pth)[:,:,:3],size) for im_pth in img_paths]

    for p,r_im in zip(img_paths,resized_imgs):
        plt.imsave(p,r_im)
项目:pypiv    作者:jr7    | 项目源码 | 文件源码
def main():
    imgs = glob('images/real_ana_finger*')
    frames = [plt.imread(x) for x in imgs]

    frame_a = frames[0]
    frame_b = frames[1]

    piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
                            search_size=32, distance=16)
    u, v = piv.correlate_frames()

    adapt_piv = pypiv.AdaptivePIV(piv, window_size=32,
                                  search_size=32, distance=16,
                                  ipmethod='cubic')
    u, v = adapt_piv.correlate_frames()

    adapt_piv = pypiv.AdaptivePIV(piv, window_size=32,
                                  search_size=32, distance=8,
                                  ipmethod='cubic')
    u, v = adapt_piv.correlate_frames()

    plt.imshow(u)
    plt.clim(-5, 5)
    plt.show()
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def frames2video(path):
    """
    Merges images in path into a video

    :param path: path with prediction images
    :return: nothing
    """
    fnames = os.listdir(path)
    fnames.sort()
    images = np.array([plt.imread(os.path.join(path, fname)) for fname in fnames])
    # h, w, c = images[0].shape
    videowriter = imageio.get_writer('prediction_video.mp4', fps=25)

    for im in images:
        videowriter.append_data(im)
    videowriter.close()
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def frames2video(name, path):
    """
    Merges images in path into a video

    :param path: path with prediction images
    :return:
    """
    batch_size = 100
    fnames = os.listdir(path)
    fnames.sort()


    #images = np.array([plt.imread(os.path.join(path, fname)) for fname in fnames])
    # h, w, c = images[0].shape
    videowriter = imageio.get_writer(name + '_video.mp4', fps=25)

    for fname in tqdm.tqdm(fnames):
        videowriter.append_data(plt.imread(os.path.join(path, fname)))
    videowriter.close()
项目:Moving-Least-Squares    作者:Jarvis73    | 项目源码 | 文件源码
def demo2(fun):
    ''' 
        Smiled Monalisa  
    '''

    p = np.array([
        [186, 140], [295, 135], [208, 181], [261, 181], [184, 203], [304, 202], [213, 225], 
        [243, 225], [211, 244], [253, 244], [195, 254], [232, 281], [285, 252]
    ])
    q = np.array([
        [186, 140], [295, 135], [208, 181], [261, 181], [184, 203], [304, 202], [213, 225], 
        [243, 225], [207, 238], [261, 237], [199, 253], [232, 281], [279, 249]
    ])
    image = plt.imread(os.path.join(sys.path[0], "monalisa.jpg"))
    plt.subplot(121)
    plt.axis('off')
    plt.imshow(image)
    transformed_image = fun(image, p, q, alpha=1, density=1)
    plt.subplot(122)
    plt.axis('off')
    plt.imshow(transformed_image)
    plt.tight_layout(w_pad=1.0, h_pad=1.0)
    plt.show()
项目:poke_semantics    作者:apilaskowski    | 项目源码 | 文件源码
def imscatter(x, y, image, ax=None, zoom=1):
    if ax is None:
        ax = plt.gca()
    try:
        image = plt.imread(image)
    except TypeError:
        # Likely already an array...
        pass
    im = OffsetImage(image, zoom=zoom)
    x, y = np.atleast_1d(x, y)
    artists = []
    for x0, y0 in zip(x, y):
        ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False)
        artists.append(ax.add_artist(ab))
    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()
    return artists
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def plot_positions(df, img_path, frame, cam):
    color_dict = {'car': '#fc8d59', 'bus': '#ffffbf', 'person': '#91cf60'}
    frame_pos = df[(df['frame'] == frame) & (df['cam'] == cam)]
    fig = plt.figure()
    ax = fig.add_subplot(111, aspect='equal')
    im = plt.imread(img_path)
    ax.imshow(im)
    for i, f in frame_pos.iterrows():
        add_rect(ax, f['x'], f['y'], f['w'], f['h'], color=color_dict[f['class_name']], name=f['id'])


    legend_handles = []
    for k, v in color_dict.iteritems():
        handle = patches.Patch(color=v, label=k)
        legend_handles.append(handle)

    plt.legend(loc=0, handles=legend_handles)
    plt.xlim((0, 360))
    plt.ylim((0, 288))
    plt.ylim(plt.ylim()[::-1])
    plt.tight_layout()
    plt.tick_params(axis='both', left='off', top='off', right='off',
                    bottom='off', labelleft='off', labeltop='off',
                    labelright='off', labelbottom='off')
    plt.show()
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def imscatter(x, y, image, ax=None, zoom=1):
    if ax is None:
        ax = plt.gca()
    try:
        image = plt.imread(image)
    except TypeError:
        # Likely already an array...
        pass
    im = OffsetImage(image, zoom=zoom)
    x, y = np.atleast_1d(x, y)
    artists = []
    for x0, y0 in zip(x, y):
        ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False)
        artists.append(ax.add_artist(ab))
    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()
    return artists
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def folder2tensor(folder,
                  extensions=['jpg','jpeg','png','BMP'],
                  paths=False,
                  shape=None
                 ):

    '''
    '''

    img_paths = get_images_from_directory(folder,extensions)

    if shape:
        #resize here
        tensor_list = [img2tensor(plt.imread(im_pth)[:,:,:3]) 
                       for im_pth in img_paths]
    tensor_list = [img2tensor(plt.imread(im_pth)[:,:,:3]) for im_pth in img_paths]

    if paths:
        return img_paths,np.vstack(tensor_list)
    else:
        return np.vstack(tensor_list)
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def match_images(input_dir, dirs, files):
    split_counter = 0
    for i in range(0, len(files)):
        print (i)
        img1 = plt.imread(os.path.join(input_dir, files[i]))
        img1 = (img1 * 255).round().astype(np.uint8)
        img1 = imresize(img1, (64, 64))
        for j in range(i+1 , len(files)):
            if (files[j][:4] == files[i][:4]):
                name = "match"+str(files[i][:10]) + "_"+ str(files[j][:10]) + ".png"
                img2 = plt.imread(os.path.join(input_dir, files[j]))
                img2 = (img2 * 255).round().astype(np.uint8)
                img2 = imresize(img2, (64, 64))
                img = np.vstack((img1, img2))
                img = Image.fromarray(img)
                if(split_counter < 8000):
                    split_counter+=1
                    img.save(os.path.join(dirs[1], name))
                else:
                    img.save(os.path.join(dirs[0], name))
项目:Handwritten_recognition_tensorflow    作者:sanjanaramprasad    | 项目源码 | 文件源码
def run(self):
        with self.output().open('w') as out_file:
            print "matching"
            split_counter = 0
            for i in range(0, len(self.files_1)):
                print (i)
                img1 = plt.imread(os.path.join(config.input_dir, self.files_1[i]))
                img1 = (img1 * 255).round().astype(np.uint8)
                img1 = imresize(img1, (64, 64))
                for j in range(i + 1, len(self.files_1)):
                    if (self.files_1[j][:4] == self.files_1[i][:4]):
                        name = "match" + str(self.files_1[i][:10]) + "_" + str(self.files_1[j][:10]) + ".png"
                        img2 = plt.imread(os.path.join(config.input_dir, self.files_1[j]))
                        img2 = (img2 * 255).round().astype(np.uint8)
                        img2 = imresize(img2, (64, 64))
                        img = np.vstack((img1, img2))
                        img = Image.fromarray(img)
                        if (split_counter < 8000):
                            split_counter += 1
                            img.save(os.path.join(self.dirs_1[1], name))
                        else:
                            img.save(os.path.join(self.dirs_1[0], name))
            out_file.write("Status : done")
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def load_images(fold):
    """
    Example
    =======
    fold = "../data/Autoencoder_compression/images_clip"
    im_a = load_images(fold)
    """
    files = listdir_files(fold)

    im_l = []
    for f in files:
        im = plt.imread(os.path.join(fold, f))
        im_l.append(im)

    im_a = np.array(im_l)
    if im_a.ndim == 4:
        im_a = im_a[:, :, :, 0]

    return im_a
项目:Machine-Learning    作者:AlanLin2015    | 项目源码 | 文件源码
def clusterclubs(numiter=5):
    datList=[]
    for line in open('E:\Users\Alan Lin\Desktop\machinelearningdata\\places.txt').readlines():
        linearr=line.split('\t')
        datList.append([float(linearr[4]),float(linearr[3])])
    datamat=mat(datList)
    mycentroids,clusterassing=biKmeans(datamat,numiter,distMeas=distslc)
    fig = plt.figure()
    rect=[0.1,0.1,0.8,0.8]
    scattermarkers=['s','o','^','8','p','d','v','h','>','<']
    axprops=dict(xticks=[],yticks=[])
    ax0=fig.add_axes(rect,label='ax1',frameon=False)
    imgP=plt.imread('E:\Users\Alan Lin\Desktop\machinelearningdata\\Portland.png')
    ax0.imshow(imgP)
    ax1=fig.add_axes(rect,label='ax1',frameon=False)
    for i in range(numiter):
        ptsincurrcluster=datamat[nonzero(clusterassing[:,0].A==i)[0],:]
        markerstyle=scattermarkers[i%len(scattermarkers)]
        ax1.scatter(ptsincurrcluster[:,0].flatten().A[0],ptsincurrcluster[:,1].flatten().A[0],marker=markerstyle,s=90)
    ax1.scatter(mycentroids[:,0].flatten().A[0],mycentroids[:,1].flatten().A[0],marker='+',s=300)
    plt.show()
项目:nba_shot_charts    作者:Connor-R    | 项目源码 | 文件源码
def acquire_teamPic(season_id, city, tname, team_id, zoom, offset=(250,370)):
    from matplotlib import  offsetbox as osb
    import urllib

    search_name = city + ' ' + tname
    img_url = abb_list.get(search_name)[1]
    try:
        img_path = os.getcwd()+'/'+str(team_id)+'.png'
        team_pic = plt.imread(img_path)
    except IOError:
        try:
            pic = urllib.urlretrieve(img_url,str(team_id)+'.png')
            team_pic = plt.imread(pic[0])
        except (ValueError, IOError):
            img_path = os.getcwd()+'/nba_logo.png'
            player_pic = plt.imread(img_path)    


    img = osb.OffsetImage(team_pic, zoom)
    img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
    return img


#teams_list generation
项目:nba_shot_charts    作者:Connor-R    | 项目源码 | 文件源码
def acquire_custom_pic(custom_img, offset=(250,370)):
    from matplotlib import offsetbox as osb
    import urllib

    if custom_img is not None:
        try:
            img_path = os.getcwd()+'/'+custom_img+'.png'
            player_pic = plt.imread(img_path)
        except IOError:
            img_path = os.getcwd()+'/chart_icon.png'
            player_pic = plt.imread(img_path)
    else:
        img_path = os.getcwd()+'/chart_icon.png'
        player_pic = plt.imread(img_path)

    img = osb.OffsetImage(player_pic)
    img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
    return img


# def gen_charts():
项目:nba_shot_charts    作者:Connor-R    | 项目源码 | 文件源码
def acquire_playerPic(player_id, zoom, offset=(250,370)):
    from matplotlib import  offsetbox as osb
    import urllib

    try:
        img_path = os.getcwd()+'/'+str(player_id)+'.png'
        player_pic = plt.imread(img_path)
    except (ValueError,IOError):
        try:
            pic = urllib.urlretrieve("http://stats.nba.com/media/players/230x185/"+str(player_id)+".png",str(player_id)+".png")
            player_pic = plt.imread(pic[0])
        except (ValueError, IOError):
            try:
                pic = urllib.urlretrieve("https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/"+str(player_id)+".png",str(player_id)+".png")
                player_pic = plt.imread(pic[0])
            except (ValueError, IOError):
                img_path = os.getcwd()+'/chart_icon.png'
                player_pic = plt.imread(img_path)      


    img = osb.OffsetImage(player_pic, zoom)
    img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
    return img
项目:pypiv    作者:jr7    | 项目源码 | 文件源码
def main():
    imgs = glob('images/finger*')
    frames = [plt.imread(x) for x in imgs]

    frame_a = frames[0]
    frame_b = frames[1]


    piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
                            search_size=32, distance=16)

    u, v = piv.correlate_frames()
    plt.imshow(u)
    plt.show()
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def resize_img(q):
    while not q.empty():
        fname = q.get()
        img = plt.imread('images_orig/'+fname)
        res = resize(img, (416,416,3))
        imsave('images_small/'+fname, res)
        q.task_done()
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def inference_batches(path, batch_size=20):
    fnames = natsorted(os.listdir(path))
    if len(fnames) % batch_size == 0:
        n_batches = len(fnames) / batch_size
    else:
        n_batches = len(fnames) // batch_size + 1

    for b_idx in range(int(n_batches)):
        images = np.array([plt.imread(path + fname) for
                           fname in fnames[b_idx * batch_size:b_idx * batch_size + batch_size]])
        names = np.array([fname for fname in fnames[b_idx * batch_size:b_idx * batch_size + batch_size]])

        yield images, names
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def inference_batches(path, batch_size=20):
    fnames = os.listdir(path)
    if len(fnames) % batch_size == 0:
        n_batches = len(fnames) / batch_size
    else:
        n_batches = len(fnames) // batch_size + 1

    for b_idx in range(int(n_batches)):
        images = np.array([plt.imread(path + fname) for
                           fname in fnames[b_idx * batch_size:b_idx * batch_size + batch_size]])
        names = np.array([fname for fname in fnames[b_idx * batch_size:b_idx * batch_size + batch_size]])

        yield images, names
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def plot_boxes(df):
    """
    Takes in a dataframe, selects 9 random rows and plots images
    with bounding boxes

    :param df:
    :return:
    """

    fig, axs = plt.subplots(3, 3, figsize=(15, 15))

    # load random images from frame and plot them with bboxes
    random_idxs = np.random.choice(np.arange(len(df)), size=9, replace=False)
    # print(random_idxs)
    for idx, ax in enumerate(axs.flatten()):
        ridx = random_idxs[idx]
        img = plt.imread('image_data/' + df.iloc[ridx]['Frame'])
        img = cv2.resize(img, (608, 608))

        # find all objects in random image
        sub_df = df[df['Frame'] == df.iloc[ridx]['Frame']]

        for objct in range(sub_df.shape[0]):
            pt1, pt2 = convert_bbox_format(sub_df.iloc[objct])
            cv2.rectangle(img, pt1, pt2, (255, 0, 0), thickness=2)
        ax.imshow(img)
        ax.axis('off')
项目:OpenSAPM    作者:pathfinder14    | 项目源码 | 文件源码
def _parse_picture(self, image_path):
        image_file = open(image_path, 'rb')
        image = plt.imread(image_file)
        return image

    # Displays the parsed picture on the plot
项目:carnd-behavioral-cloning    作者:nikidimi    | 项目源码 | 文件源码
def save_image_data(paths, labels):
    """
    This functions reads lists in the DataPreparator format, opens an image, 
    applies transformations and saves them in numpy binary format

    Parameters
    ----------
    paths : string
        The path to append in front

    """
    # Output data format
    X_train = np.empty([len(paths), 16, 32, 1])
    y_train = np.empty([len(paths)])

    # Go over all images
    for index in range(0, len(paths)):
        image = plt.imread(paths[index][0])        

        # Apply the needed transformations    
        arr = image_preprocess(image, paths[index][2])
        if (paths[index][1]):
            arr = cv2.flip(arr, 1)

        # Store the data for output, reshaping it from 16X32 to 16X32X1
        X_train[index] = arr.reshape((16, 32, 1))
        y_train[index] = labels[index]

    # Save the data in numpy binary format. 
    # This speeds up training because we need to do image parsing only once, not before every training
    np.save("x.data", X_train)
    np.save("y.data", y_train)
项目:Moving-Least-Squares    作者:Jarvis73    | 项目源码 | 文件源码
def show_example():
    img = plt.imread(os.path.join(sys.path[0], "double.jpg"))
    plt.imshow(img)
    plt.show()
项目:Moving-Least-Squares    作者:Jarvis73    | 项目源码 | 文件源码
def demo(fun, fun_inv, name):
    p = np.array([
        [30, 155], [125, 155], [225, 155],
        [100, 235], [160, 235], [85, 295], [180, 293]
    ])
    q = np.array([
        [42, 211], [125, 155], [235, 100],
        [80, 235], [140, 235], [85, 295], [180, 295]
    ])
    image = plt.imread(os.path.join(sys.path[0], "mr_big_ori.jpg"))

    plt.figure(figsize=(8, 6))
    plt.subplot(231)
    plt.axis('off')
    plt.imshow(image)
    plt.title("Original Image")
    if fun is not None:
        transformed_image = fun(image, p, q, alpha=1, density=1)
        plt.subplot(232)
        plt.axis('off')
        plt.imshow(transformed_image)
        plt.title("%s Deformation \n Sampling density 1"%name)
        transformed_image = fun(image, p, q, alpha=1, density=0.7)
        plt.subplot(235)
        plt.axis('off')
        plt.imshow(transformed_image)
        plt.title("%s Deformation \n Sampling density 0.7"%name)
    if fun_inv is not None:
        transformed_image = fun_inv(image, p, q, alpha=1, density=1)
        plt.subplot(233)
        plt.axis('off')
        plt.imshow(transformed_image)
        plt.title("Inverse %s Deformation \n Sampling density 1"%name)
        transformed_image = fun_inv(image, p, q, alpha=1, density=0.7)
        plt.subplot(236)
        plt.axis('off')
        plt.imshow(transformed_image)
        plt.title("Inverse %s Deformation \n Sampling density  0.7"%name)

    plt.tight_layout(w_pad=0.1)
    plt.show()
项目:brainpipe    作者:EtienneCmb    | 项目源码 | 文件源码
def load(self, name):
        return plt.imread(join(self._path, name))
项目:SDCND_Behavioral_Cloning    作者:andrewraharjo    | 项目源码 | 文件源码
def load_image(data_line, j):
    img = plt.imread(data_line[j].strip())[65:135:4,0:-1:4,0]
    lis = img.flatten().tolist()
    return lis
项目:cartpoleplusplus    作者:matpalm    | 项目源码 | 文件源码
def png_to_rgb(png_bytes):
  """convert png (from rgb_to_png) to RGB"""
  # note PNG is always RGBA so we need to slice off A
  rgba = plt.imread(StringIO.StringIO(png_bytes))
  return rgba[:,:,:3]
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, root, split):
        if split not in ['train', 'test', 'all']:
            raise ValueError

        dir = os.path.join(root, split)
        filenames = glob.glob(os.path.join(dir, '*.png'))

        if split == 'all':
            filenames = glob.glob(os.path.join(root, 'train/*.png'))
            filenames.extend(glob.glob(os.path.join(root, 'test/*.png')))

        filenames = sorted(
            filenames, key=lambda x: int(os.path.basename(x).split('.')[0]))

        images = []

        for f in filenames:
            img = plt.imread(f)
            img[img != 1] = 0
            images.append(resize(rgb2gray(img), [48, 48], mode='constant'))

        self.images = np.array(images, dtype=np.float32)
        self.images = self.images.reshape([len(images), 48, 48, 1])

        action_filename = os.path.join(root, 'actions.txt')

        with open(action_filename) as infile:
            actions = np.array([float(l) for l in infile.readlines()])

        self.actions = actions[:len(self.images)].astype(np.float32)
        self.actions = self.actions.reshape(len(actions), 1)
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, fname, env_file):
        super(PlaneData, self).__init__()
        self.cache = fname
        self.initialized = False
        self.im = plt.imread(os.path.join(os.path.dirname(__file__), env_file))  # grayscale
        self.params = (x_dim, u_dim, T)
项目:poke_semantics    作者:apilaskowski    | 项目源码 | 文件源码
def main():
    x = np.linspace(0, 10, 20)
    y = np.cos(x)
    image_path = mpimg.imread('sprites/1.png')
    fig, ax = plt.subplots()
    imscatter(x, y, image_path,  ax=ax)
    ax.plot(x, y)
    plt.show()
项目:My-TensorFlow-tutorials    作者:kevin28520    | 项目源码 | 文件源码
def show_feature_map():
    cat = plt.imread('cat.jpg') #unit8
    plt.imshow(cat)
    cat = tf.cast(cat, tf.float32) #[360, 300, 3]
    x = tf.reshape(cat, [1, 360, 300,3]) #[1, 360, 300, 3]

    out = 25

    with tf.variable_scope('conv1'):
        w = tools.weight([3,3,3,out], is_uniform=True)
        x_w = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
        b = tools.bias([out])
        x_b = tf.nn.bias_add(x_w, b)        
        x_relu = tf.nn.relu(x_b)

    n_feature = int(x_w.get_shape()[-1])
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    feature_map = tf.reshape(x_w, [360,300,out])
    images = tf.image.convert_image_dtype (feature_map, dtype=tf.uint8)
    images = sess.run(images)

    plt.figure(figsize=(10, 10))
    for i in np.arange(0, n_feature):
        plt.subplot(5, 5, i + 1)
        plt.axis('off')
        plt.imshow(images[:,:,i])
    plt.show()
项目:My-TensorFlow-tutorials    作者:kevin28520    | 项目源码 | 文件源码
def show_rich_feature():
    cat = plt.imread('cat.jpg') #unit8
    plt.imshow(cat)
    cat = tf.cast(cat, tf.float32) #[360, 300, 3]
    x = tf.reshape(cat, [1, 360, 300,3]) #[1, 360, 300, 3]

    with tf.variable_scope('conv1_1', reuse=True):
        w1 = tf.get_variable('weights', (3,3,3,64))
        b1 = tf.get_variable('biases', (64))

        x_w = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='SAME')
        x_b = tf.nn.bias_add(x_w, b1)        
        x_relu = tf.nn.relu(x_b)

        out = 64

        n_feature = int(x_w.get_shape()[-1])
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        feature_map = tf.reshape(x_relu, [360,300,out])
        images = tf.image.convert_image_dtype (feature_map, dtype=tf.uint8)
        images = sess.run(images)

        plt.figure(figsize=(10, 10))
        for i in np.arange(0, 25):
            plt.subplot(5, 5, i + 1)
            plt.axis('off')
            plt.imshow(images[:,:,i])
        plt.show()
#%%
项目:behavioral-cloning    作者:dmonn    | 项目源码 | 文件源码
def process_img(img):
    """
    Load image and crop
    """
    img = "{}/{}".format(DATA_PATH, img)
    img = plt.imread(img)[60:135, : ]

    if DEBUGING_FLAG:
        # Show image if Debug Flag is enabled
        plt.imshow(img)
        plt.show()
        sys.exit("Ending preprocessing here; not done.")

    return img
项目:pyglitch    作者:giofusco    | 项目源码 | 文件源码
def open_image(filename):
    """loads the image
        :param filename: path to the image
    """
    I = plt.imread(filename, format=None)
    if I.dtype == np.float32:
        I *= 255
    I = I.astype(np.uint8)
    return I.copy()
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def draw_photo_pin(data_directory, ax, map, photo_lon, photo_lat):
    x_size, y_size = 1.3, 1.3*1.78
    x0, y0 = map(photo_lon - x_size/2., photo_lat - y_size/2.)
    x1, y1 = map(photo_lon + x_size/2., photo_lat + y_size/2.)

    im = plt.imread(os.path.join(data_directory, 'Map_pin.png'))
    plt.imshow(im, zorder=3, extent=(x0, x1, y0, y1))
项目:PyDLSSVM    作者:djosix    | 项目源码 | 文件源码
def load_dataset(name):
    path = "datasets/" + name.strip("/") + "/"
    img_path = path + "img/"
    truth_path = path + "groundtruth_rect.txt"

    truths = []
    with open(truth_path, "rt") as f:
        for line in f.readlines():
            truths += [np.array(line.strip().split(","), dtype=int)]

    temp_path = "/tmp/" + path.replace("/", "_") + ".pkl"
    if not os.path.isfile(temp_path):
        print("Loading dataset")
        files = os.listdir(img_path)
        files.sort()
        frames = []
        for f in files:
            print("Loading %s" % (img_path + f))
            frames += [plt.imread(img_path + f)]
        with open(temp_path, "wb") as f:
            print("Saving dataset to %s" % temp_path)
            pickle.dump(frames, f)
    else:
        with open(temp_path, "rb") as f:
            print("Loading dataset %s" % temp_path)
            frames = pickle.load(f)

    return (frames, truths)
项目:gm-cml    作者:wangyida    | 项目源码 | 文件源码
def get_myown_imgs(direc):
    scan=ScanFile(direc)
    files_img=scan.scan_files()
    return [plt.imread(f_i) for f_i in files_img]

# Write a function to preprocess/normalize an image, given its dataset object
# (which stores the mean and standard deviation!)
项目:gm-cml    作者:wangyida    | 项目源码 | 文件源码
def get_myown_imgs(direc):
    scan=ScanFile(direc)
    files_img=scan.scan_files()
    return [plt.imread(f_i) for f_i in files_img]

# Write a function to preprocess/normalize an image, given its dataset object
# (which stores the mean and standard deviation!)
项目:gm-cml    作者:wangyida    | 项目源码 | 文件源码
def get_celeb_imgs(max_images=100):
    """Load the first `max_images` images of the celeb dataset.

    Returns
    -------
    imgs : list of np.ndarray
        List of the first 100 images from the celeb dataset
    """
    return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
项目:matplotlib-colorbar    作者:ppinard    | 项目源码 | 文件源码
def test_colorbar_example1():
    with cbook.get_sample_data('grace_hopper.png') as fp:
        data = np.array(plt.imread(fp))

    fig = plt.figure(figsize=(8, 6))
    ax = fig.add_subplot("111", aspect='equal')
    mappable = ax.imshow(data[..., 0], cmap='viridis')
    colorbar = Colorbar(mappable, location='lower left')
    colorbar.set_ticks([0.0, 0.5, 1.0])
    ax.add_artist(colorbar)
项目:matplotlib-colorbar    作者:ppinard    | 项目源码 | 文件源码
def test_colorbar_example2():
    with cbook.get_sample_data('grace_hopper.png') as fp:
        data = np.array(plt.imread(fp))

    fig = plt.figure(figsize=(8, 6))
    ax = fig.add_subplot("111", aspect='equal')
    norm = matplotlib.colors.Normalize(vmin=-1.0, vmax=1.0)
    mappable = ax.imshow(data[..., 0], cmap='viridis', norm=norm)
    colorbar = Colorbar(mappable, location='lower left')
    colorbar.set_ticks([-1.0, 0, 1.0])
    ax.add_artist(colorbar)
项目:TF-FaceLandmarkDetection    作者:mariolew    | 项目源码 | 文件源码
def get_celeb_imgs(max_images=100):
    """Load the first `max_images` images of the celeb dataset.

    Returns
    -------
    imgs : list of np.ndarray
        List of the first 100 images from the celeb dataset
    """
    return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
项目:e2c    作者:ericjang    | 项目源码 | 文件源码
def __init__(self, fname, env_file):
    super(PlaneData, self).__init__()
    self.cache=fname
    self.initialized=False
    self.im=plt.imread(env_file) # grayscale
    self.params=(x_dim,u_dim,T)
项目:social-lstm-tf    作者:vvanirudh    | 项目源码 | 文件源码
def convert_to_obstacle_map(img):
    '''
    Function to create an obstacle map from the annotaetd image
    params:
    img : Image file path
    '''
    im = plt.imread(img)
    # im is a numpy array of shape (w, h, 4)
    w = im.shape[0]
    h = im.shape[1]

    obs_map = np.ones((w, h))

    for i in range(w):
        for j in range(h):
            # rgba is a 4-dimensional vector
            rgba = im[i, j]
            # obstacle
            if rgba[0] == 0 and rgba[1] == 0 and rgba[2] == 0:
                # print "Obstacle found"
                obs_map[i, j] = 0
            # Partially traversable
            elif rgba[0] == 0 and rgba[1] == 0:
                # print "Partially traversable found"
                obs_map[i, j] = 0.5

    return obs_map
项目:ecogdeep    作者:nancywang1991    | 项目源码 | 文件源码
def imscatter(x, y, image, ax=None, color=None, days=None):
    """Scatter image at x, y on scatter graph

    Args:
        x (int): x location of data point
        y (int): y location of data point
        image: PIL image to be displayed
        ax: scatterplot handle
        color (r,g,b,a): if not None, border color
        days (list of int): if not None, select color based on time of datapoint and days contains
                            the days present in dataset
    Returns:
        artists (list of axis artists)
    """
    if ax is None:
        ax = plt.gca()
    try:
        image = plt.imread(image)
    except TypeError:
        # Likely already an array...
        pass

    x, y = np.atleast_1d(x, y)
    artists = []
    cmap = matplotlib.cm.get_cmap('nipy_spectral')
    for x0, y0, im0 in zip(x, y, image):
        if days:
        # Assumes around 700 videos per day
            color = cmap((days.index(int(im0.split("/")[-1].split("_")[1]))*700+int(im0.split("/")[-1].split("_")[2]))/((len(days))*700.0))
    if os.path.exists(im0):
            im = load_img_seq(im0, resize_size=(1,1), color=color)
            im = OffsetImage(im, zoom=2)
            ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=frameon)
            artists.append(ax.add_artist(ab))
    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()
    return artists
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def folder2tensor(folder,
                  extensions = ['jpg','jpeg','png'],
                  paths = False,
                  img_shape=(224,224)
                 ):

    '''
        Reads and transforms a folder of images to tensor for keras models
        Inputs:
            folder: string, filepath to folder
            extensions:list, list of valid extensions
            mode: string, model mode leave as "imagenet" for now
            paths:boolean, whether paths should be outputed or only tensor
        Outputs:
            tensor build from the folder images or tuple list of (filepaths,tensor)
    '''

    img_paths = get_images_from_directory(folder,extensions)

    tensor_list = [img2tensor(plt.imread(im_pth)[:,:,:3], img_shape) 
                   for im_pth in img_paths]

    if paths:
        return img_paths,np.vstack(tensor_list)
    else:
        return np.vstack(tensor_list)
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def plot_folder(folder,extensions = ['jpg','jpeg','png','bmp'],**kwargs):
    '''
        Plots all the images from a specified folder with specified extensions

        Inputs:
            folder: string, path to folder with images
            extensions:list, list of valid extensions
    '''

    filepaths = get_images_from_directory(dir_path = folder,extensions = extensions)
    img_list = [plt.imread(f) for f in filepaths]
    plot_list(img_list,**kwargs)
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def browse(self, figsize=(16,10), labels=None):        
        def plot(layer_id, filter_id):
            filepath = '{}/{}/{}/img.jpg'.format(self.save_dir_, 
                                                 layer_id, filter_id)            
            img = plt.imread(filepath)
            plt.figure(figsize=figsize)
            if labels:
                plt.title('Label: {}'.format(labels[int(filter_id)]))
            plt.imshow(img)
            plt.show()
        return interact(plot, layer_id='1',filter_id='0')
项目:hellish    作者:unlimblue    | 项目源码 | 文件源码
def load_image(image_path):
    if "http" in image_path:
        img = io.imread(urllib.parse.unquote(image_path))
    else:
        img = plt.imread(image_path)
    return img