Python matplotlib.pyplot 模块,imshow() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.imshow()

项目:lyricswordcloud    作者:qwertyyb    | 项目源码 | 文件源码
def showData(self):
    print('???,????···')
    mask = imread(self.picfile)
    imgcolor = ImageColorGenerator(mask)
    wcc = WordCloud(font_path='./msyhl.ttc', 
    mask=mask, background_color='white', 
    max_font_size=200, 
    max_words=300,
    color_func=imgcolor
    )
    wc = wcc.generate_from_frequencies(self.data)
    plt.figure()
    plt.imshow(wc)
    plt.axis('off')
    print('?????')
    plt.show()
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def imshow(tensor, imsize=512, title=None):
    image = tensor.clone().cpu()
    image = image.view(*tensor.size())
    image = transforms.ToPILImage()(image)
    plt.imshow(image)
    if title is not None:
        plt.title(title)
    plt.pause(5)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def main():
    # reading in an image
    #image = (mpimg.imread('test_images/solidWhiteRight.jpg') * 255).astype('uint8')
    #image = (mpimg.imread('test_images/solidWhiteCurve.jpg') * 255).astype('uint8')
    #image = (mpimg.imread('test_images/solidYellowCurve.jpg') * 255).astype('uint8')
    #image = (mpimg.imread('test_images/solidYellowCurve2.jpg') * 255).astype('uint8')
    #image = (mpimg.imread('test_images/solidYellowLeft.jpg') * 255).astype('uint8')
    image = (mpimg.imread('test_images/whiteCarLaneSwitch.jpg') * 255).astype('uint8')
    processImage = process_image(image)
    plt.imshow(processImage)
    plt.show()

    # Make video
    white_output = 'white.mp4'
    clip1 = VideoFileClip("solidWhiteRight.mp4")
    white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
    white_clip.write_videofile(white_output, audio=False)

    # Make video
    yellow_output = 'yellow.mp4'
    clip2 = VideoFileClip('solidYellowLeft.mp4')
    yellow_clip = clip2.fl_image(process_image)
    yellow_clip.write_videofile(yellow_output, audio=False)
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web????????????
项目:py-faster-rcnn-tk1    作者:joeking11829    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def decode_segmap(self, temp, plot=False):
        label_colours = self.get_pascal_labels()
        r = temp.copy()
        g = temp.copy()
        b = temp.copy()
        for l in range(0, self.n_classes):
            r[temp == l] = label_colours[l, 0]
            g[temp == l] = label_colours[l, 1]
            b[temp == l] = label_colours[l, 2]

        rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
        rgb[:, :, 0] = r / 255.0
        rgb[:, :, 1] = g / 255.0
        rgb[:, :, 2] = b / 255.0
        if plot:
            plt.imshow(rgb)
            plt.show()
        else:
            return rgb
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def decode_segmap(self, temp, plot=False):
        # TODO:(@meetshah1995)
        # Verify that the color mapping is 1-to-1
        r = temp.copy()
        g = temp.copy()
        b = temp.copy()
        for l in range(0, self.n_classes):
            r[temp == l] = 10 * (l%10)
            g[temp == l] = l
            b[temp == l] = 0

        rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
        rgb[:, :, 0] = (r/255.0)
        rgb[:, :, 1] = (g/255.0)
        rgb[:, :, 2] = (b/255.0)
        if plot:
            plt.imshow(rgb)
            plt.show()
        else:
            return rgb
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_masks(scans,masks_list):
    #%matplotlib inline
    scans1=scans.copy()
    maxv=255
    masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
    for i_m in range(len(masks_list)):
        for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
            for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
                masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
        for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
    for i in range(scans.shape[0]):
        print ('scan '+str(i))
        f, ax = plt.subplots(1, 2,figsize=(10,5))
        ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
        ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
        plt.show()
    return(masks)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def test_penalty_env(env):
    import envs
    env = envs.create_env("Pong", location="bottom", catastrophe_type="1", 
                          classifier_file=save_classifier_path + '/0/final.ckpt')

    import matplotlib.pyplot as plt

    observation = env.reset()

    for _ in range(20):
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        plt.imshow(observation[:,:,0])
        plt.show()
        print('Cat: ', info['frame/is_catastrophe'])
        print('reward: ', reward)
        if done:
            break
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def test_penalty_env(env):
    import envs
    env = envs.create_env("Pong", location="bottom", catastrophe_type="1", 
                          classifier_file=save_classifier_path + '/0/final.ckpt')

    import matplotlib.pyplot as plt

    observation = env.reset()

    for _ in range(20):
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        plt.imshow(observation[:,:,0])
        plt.show()
        print('Cat: ', info['frame/is_catastrophe'])
        print('reward: ', reward)
        if done:
            break
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create_figure(predictions_dict):
  """Creates and returns a new figure that visualizes
  attention scores for for a single model predictions.
  """

  # Find out how long the predicted sequence is
  target_words = list(predictions_dict["predicted_tokens"])

  prediction_len = _get_prediction_length(predictions_dict)

  # Get source words
  source_len = predictions_dict["features.source_len"]
  source_words = predictions_dict["features.source_tokens"][:source_len]

  # Plot
  fig = plt.figure(figsize=(8, 8))
  plt.imshow(
      X=predictions_dict["attention_scores"][:prediction_len, :source_len],
      interpolation="nearest",
      cmap=plt.cm.Blues)
  plt.xticks(np.arange(source_len), source_words, rotation=45)
  plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
  fig.tight_layout()

  return fig
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_labeled_images_random(image_list, label_list, categories, n, title_str, ypixels, xpixels, seed, filename):
    random.seed(seed)
    index_sample = random.sample(range(len(image_list)), n)
    plt.figure(figsize=(2*n, 2))
    #plt.suptitle(title_str)
    for i, ind in enumerate(index_sample):
        ax = plt.subplot(1, n, i + 1)
        plt.imshow(image_list[ind].reshape(ypixels, xpixels))
        plt.gray()
        ax.set_title(categories[label_list[ind]], fontsize=20)
        ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False)
    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_unlabeled_images_random: plots unlabeled images at random
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_unlabeled_images_random(image_list, n, title_str, ypixels, xpixels, seed, filename):
    random.seed(seed)
    index_sample = random.sample(range(len(image_list)), n)
    plt.figure(figsize=(2*n, 2))
    plt.suptitle(title_str)
    for i, ind in enumerate(index_sample):
        ax = plt.subplot(1, n, i + 1)
        plt.imshow(image_list[ind].reshape(ypixels, xpixels))
        plt.gray()
        ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False)
    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_compare: given test images and their reconstruction, we plot them for visual comparison
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_compare(x_test, decoded_imgs, filename):
    n = 10
    plt.figure(figsize=(2*n, 4))
    for i in range(n):
        # display original
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(x_test[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(decoded_imgs[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_img: plots greyscale image
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def plot_spectra(results):
    plt.figure(figsize=(10, 4))
    plt.imshow(
        np.concatenate(
            [np.flipud(results['x'].T),
             np.flipud(results['xh'].T),
             np.flipud(results['x_conv'].T)],
            0),
        aspect='auto',
        cmap='jet',
    )
    plt.colorbar()
    plt.title('Upper: Real input; Mid: Reconstrution; Lower: Conversion to target.')
    plt.savefig(
        os.path.join(
            args.logdir,
            '{}.png'.format(
                os.path.split(str(results['f'], 'utf-8'))[-1]
            )
        )
    )
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet):
    target_names = map(lambda key: key.replace('_','-'), clf_target_names)

    for idx in range(len(cm)): 
        cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    # plt.matshow(cm)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(clf_target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    # plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def imshow_cv(label, im, block=False, text=None, wait=2): 
    vis = im.copy()
    print_status(vis, text=text)
    window_manager.imshow(label, vis)
    ch = cv2.waitKey(0 if block else wait) & 0xFF
    if ch == ord(' '):
        cv2.waitKey(0)
    if ch == ord('v'):
        print('Entering debug mode, image callbacks active')
        while True: 
            ch = cv2.waitKey(10) & 0xFF
            if ch == ord('q'): 
                print('Exiting debug mode!')
                break
    if ch == ord('s'):
        fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S")
        print 'Saving %s' % fn
        cv2.imwrite(fn, vis)
    elif ch == 27 or ch == ord('q'):
        sys.exit(1)
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def plotGeneratedImages(epoch,example=100,dim=(10,10),figsize=(10,10)):
    noise = np.random.normal(0,1,size=(example,randomDim))
    generatedImage = generator.predict(noise)
    generatedImage = generatedImage.reshape(example,28,28)

    plt.figure(figsize=figsize)

    for i in range(example):
        plt.subplot(dim[0],dim[1],i+1)
        plt.imshow(generatedImage[i],interpolation='nearest',cmap='gray')
        '''drop the x and y axis'''
        plt.axis('off')
    plt.tight_layout()

    if not os.path.exists('generated_image'):
        os.mkdir('generated_image')
    plt.savefig('generated_image/wgan_generated_img_epoch_%d.png' % epoch)
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def show_one_img_mask(data):
    w,h = 1918,1280
    a = randint(0,31)
    path = "../input/test"
    data = np.load(data).item()
    name,masks = data['name'][a],data['pred']
    img = Image.open("%s/%s"%(path,name))
    #img.show()
    plt.imshow(img)
    plt.show()
    mask = np.squeeze(masks[a])
    mask = imresize(mask,[h,w]).astype(np.float32)
    print(mask.shape,mask[0])
    img = Image.fromarray(mask*256)#.resize([w,h])
    plt.imshow(img)
    plt.show()
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, col, title, cmap=plt.cm.viridis):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    for i in range(cm.shape[0]):
        plt.annotate("%.2f" %cm[i][i],xy=(i,i),
                    horizontalalignment='center',
                    verticalalignment='center')
    plt.title(title,fontsize=18)
    plt.colorbar(fraction=0.046, pad=0.04)
    tick_marks = np.arange(len(col.unique()))
    plt.xticks(tick_marks, sorted(col.unique()),rotation=90)
    plt.yticks(tick_marks, sorted(col.unique()))
    plt.tight_layout()
    plt.ylabel('True label',fontsize=18)
    plt.xlabel('Predicted label',fontsize=18)

#using flavor network to project recipes from ingredient matrix to flavor matrix
项目:SourceFilterContoursMelody    作者:juanjobosch    | 项目源码 | 文件源码
def imageM(*args,**kwargs):
    """
    imageM(*args, **kwargs)

    This function essentially is a wrapper for the
    matplotlib.pyplot function imshow, such that the actual result
    looks like the default that can be obtained with the MATLAB
    function image.

    The arguments are the same as the arguments for function imshow.
    """
    # The appearance of the image: nearest means that the image
    # is not smoothed:
    kwargs['interpolation'] = 'nearest'
    # keyword 'aspect' allows to adapt the aspect ratio to the
    # size of the window, and not the opposite (which is the default
    # behaviour):
    kwargs['aspect'] = 'auto'
    kwargs['origin'] = 0
    plt.imshow(*args,**kwargs)
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def visualize_gt_roidb(imdb, gt_roidb):
    """
    visualize gt roidb
    :param imdb: the imdb to be visualized
    :param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
    :return: None
    """
    import matplotlib.pyplot as plt
    import skimage.io
    for i in range(len(gt_roidb)):
        im_path = imdb.image_path_from_index(imdb.image_set_index[i])
        im = skimage.io.imread(im_path)
        roi_rec = gt_roidb[i]
        plt.imshow(im)
        for bbox, gt_class, overlap in zip(roi_rec['boxes'], roi_rec['gt_classes'], roi_rec['gt_overlaps']):
            box = plt.Rectangle((bbox[0], bbox[1]),
                                bbox[2] - bbox[0],
                                bbox[3] - bbox[1], fill=False,
                                edgecolor='g', linewidth=3)
            plt.gca().add_patch(box)
            plt.gca().text(bbox[0], bbox[1], imdb.classes[gt_class] + ' {}'.format(overlap[0, gt_class]), color='w')
        plt.show()
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
    """Visualize a mini-batch for debugging."""
    import matplotlib.pyplot as plt
    for i in xrange(rois_blob.shape[0]):
        rois = rois_blob[i, :]
        im_ind = rois[0]
        roi = rois[1:]
        im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
        im += cfg.PIXEL_MEANS
        im = im[:, :, (2, 1, 0)]
        im = im.astype(np.uint8)
        cls = labels_blob[i]
        plt.imshow(im)
        print 'class: ', cls, ' overlap: ', overlaps[i]
        plt.gca().add_patch(
            plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
                          roi[3] - roi[1], fill=False,
                          edgecolor='r', linewidth=3)
            )
        plt.show()
项目:pypiv    作者:jr7    | 项目源码 | 文件源码
def main():
    imgs = glob('images/real_ana_finger*')
    frames = [plt.imread(x) for x in imgs]

    frame_a = frames[0]
    frame_b = frames[1]

    piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
                            search_size=32, distance=16)
    u, v = piv.correlate_frames()

    adapt_piv = pypiv.AdaptivePIV(piv, window_size=32,
                                  search_size=32, distance=16,
                                  ipmethod='cubic')
    u, v = adapt_piv.correlate_frames()

    adapt_piv = pypiv.AdaptivePIV(piv, window_size=32,
                                  search_size=32, distance=8,
                                  ipmethod='cubic')
    u, v = adapt_piv.correlate_frames()

    plt.imshow(u)
    plt.clim(-5, 5)
    plt.show()
项目:PyBASC    作者:AkiNikolaidis    | 项目源码 | 文件源码
def test_cluster_matrix_average():

    import utils
    import basc
    import matplotlib.pyplot as plt


    blobs = generate_blobs()
    ism = utils.individual_stability_matrix(blobs, 100, 3)
    y_predict = utils.cluster_timeseries(blobs, 3, similarity_metric = 'correlation', affinity_threshold=0.0)
    cluster_voxel_scores, K_mask = utils.cluster_matrix_average(ism, y_predict)

    plt.imshow(K_mask)


#%% TEST BASC.PY
#Remaining Tests to write:
    #Join_group_stability
    #cluster_selection
    #individual_group_clustered_maps
    #ndarray_to_vol
项目:iFruitFly    作者:AdnanMuhib    | 项目源码 | 文件源码
def imagePloter(_feature_data, _im):
    fig = plt.figure();
    fig.add_subplot(1, 2, 1);
    print("Processing Please Wait...");
    im = misc.imread(_im);
    plt.imshow(im);
    n = 1;

    for cluster in _feature_data:
        points = cluster['points'];
        im = np.zeros((480, 640), dtype=int)
        im[points] = cluster['values']
        #plt.figure();
        #plt.imshow(im, cmap='gray')
        print("Writing Images...");
        fig.add_subplot(1, 2, 2);
        plt.imshow(im);
        prefix = re.split('IR_|.pgm', _im)[0];
        #print(prefix);
        postfix = re.split('IR_|.pgm', _im)[1]; 
        #print(postfix);
        plt.savefig(prefix + postfix + "_Cluster_" + str(n) + ".png");
        n = n + 1;
        print("Done..");
    return
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
    """Visualize a mini-batch for debugging."""
    import matplotlib.pyplot as plt
    for i in xrange(rois_blob.shape[0]):
        rois = rois_blob[i, :]
        im_ind = rois[0]
        roi = rois[1:]
        im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
        im += cfg.PIXEL_MEANS
        im = im[:, :, (2, 1, 0)]
        im = im.astype(np.uint8)
        cls = labels_blob[i]
        plt.imshow(im)
        print 'class: ', cls, ' overlap: ', overlaps[i]
        plt.gca().add_patch(
            plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
                          roi[3] - roi[1], fill=False,
                          edgecolor='r', linewidth=3)
            )
        plt.show()
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
    """Visualize a mini-batch for debugging."""
    import matplotlib.pyplot as plt
    for i in xrange(rois_blob.shape[0]):
        rois = rois_blob[i, :]
        im_ind = rois[0]
        roi = rois[1:]
        im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
        im += cfg.PIXEL_MEANS
        im = im[:, :, (2, 1, 0)]
        im = im.astype(np.uint8)
        cls = labels_blob[i]
        plt.imshow(im)
        print 'class: ', cls, ' overlap: ', overlaps[i]
        plt.gca().add_patch(
            plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
                          roi[3] - roi[1], fill=False,
                          edgecolor='r', linewidth=3)
            )
        plt.show()
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
    """Visualize a mini-batch for debugging."""
    import matplotlib.pyplot as plt
    for i in xrange(rois_blob.shape[0]):
        rois = rois_blob[i, :]
        im_ind = rois[0]
        roi = rois[1:]
        im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
        im += cfg.PIXEL_MEANS
        im = im[:, :, (2, 1, 0)]
        im = im.astype(np.uint8)
        cls = labels_blob[i]
        plt.imshow(im)
        print 'class: ', cls, ' overlap: ', overlaps[i]
        plt.gca().add_patch(
            plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
                          roi[3] - roi[1], fill=False,
                          edgecolor='r', linewidth=3)
            )
        plt.show()
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def CropLowerBoundary(img):
    # img_gray = ToGrayImage(path)
    _,img_bi = cv2.threshold(img,60,255,cv2.THRESH_BINARY)
    threshold_rate = 0.95
    threshold_row = -1
    row,col = img_bi.shape
    for tmp_r in range(row-1,-1,-1):
        tmp_sum = sum(img_bi[tmp_r])
        rate = float(tmp_sum)/255/col
        # print(rate)
        if rate>threshold_rate:
            threshold_row = tmp_r
            break
    img = img[0:threshold_row,:]
    # plt.imshow(img,"gray")
    # plt.show()
    return img
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def vis_square(data):
    """Take an array of shape (n, height, width) or (n, height, width, 3)
       and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""

    # normalize data for display
    data = (data - data.min()) / (data.max() - data.min())

    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = (((0, n ** 2 - data.shape[0]),
               (0, 1), (0, 1))                 # add some space between filters
               + ((0, 0),) * (data.ndim - 3))  # don't pad the last dimension (if there is one)
    data = np.pad(data, padding, mode='constant', constant_values=1)  # pad with ones (white)

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    plt.imshow(data, interpolation='nearest'); plt.axis('off')
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def __plot_canvas(self, show, save):
        if len(self.result) == 0:
            raise Exception('Please run blur_image() method first.')
        else:
            plt.close()
            plt.axis('off')
            fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10))
            if len(self.result) > 1:
                for i in range(len(self.result)):
                        axes[i].imshow(self.result[i])
            else:
                plt.axis('off')

                plt.imshow(self.result[0])
            if show and save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
                plt.show()
            elif save:
                if self.path_to_save is None:
                    raise Exception('Please create Trajectory instance with path_to_save')
                cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)
            elif show:
                plt.show()
项目:handwritten-sequence-tensorflow    作者:johnsmithm    | 项目源码 | 文件源码
def fast_run(args):
    model = Model(args)
    feed = {}
    #feed[model.train_batch]=False
    xx,ss,yy=model.inputs(args.input_path)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    xxx,sss,yyy=sess.run([xx,ss,yy])
    #print(yyy)
    #print(yyy[1])
    print('len:',xxx.shape)
    import matplotlib.cm as cm
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt
    plt.figure(figsize=(16,4))
    #plt.imshow()
    plt.imshow(np.asarray(xxx[0]).reshape((36,90))+0.5, interpolation='nearest', aspect='auto', cmap=cm.jet)
    plt.savefig("img.jpg")
    plt.clf() ; plt.cla()
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def plot_grid(images,w=10,path="plan.png",verbose=False):
    import matplotlib.pyplot as plt
    l = 0
    images = fix_images(images)
    l = len(images)
    h = int(math.ceil(l/w))
    plt.figure(figsize=(w*1.5, h*1.5))
    for i,image in enumerate(images):
        ax = plt.subplot(h,w,i+1)
        try:
            plt.imshow(image,interpolation='nearest',cmap='gray',)
        except TypeError:
            TypeError("Invalid dimensions for image data: image={}".format(np.array(image).shape))
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    print(path) if verbose else None
    plt.tight_layout()
    plt.savefig(path)
    plt.close()

# contiguous image
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def plot_grid2(images,w=10,path="plan.png",verbose=False):
    import matplotlib.pyplot as plt
    images = fix_images(images)
    l = images.shape[0]
    h = int(math.ceil(l/w))
    margin = 3
    m_shape = (margin + np.array(images.shape[1:]))
    all_shape = m_shape * np.array((h,w))
    figure = np.ones(all_shape)
    print(images.shape,h,w,m_shape,figure.shape) if verbose else None
    for y in range(h):
        for x in range(w):
            begin = m_shape * np.array((y,x))
            end   = (m_shape * (np.array((y,x))+1)) - margin
            # print(begin,end,y*w+x)
            if y*w+x < len(images):
                figure[begin[0]:end[0],begin[1]:end[1]] = images[y*w+x]
    plt.figure(figsize=all_shape[::-1] * 0.01)
    plt.imshow(figure,interpolation='nearest',cmap='gray',)
    print(path) if verbose else None
    plt.tight_layout()
    plt.savefig(path)
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def display_classes(png, images, classes, ncol=4):
  """
  Draw a number of images and their predictions

  Example:
  images = data[1][:12]
  classes = model.predict_classes('classes.png', images)
  """
  fig = plt.figure()
  nrow = len(images) / ncol
  if len(images) % ncol > 0: nrow = nrow + 1

  def draw(i):
    plt.subplot(nrow,ncol,i)
    plt.imshow(images[i].reshape(28,28), cmap='gray', interpolation='none')
    plt.title('Predicted: %s' % classes[i])
  [ draw(i) for i in range(0,len(images)) ]
  plt.tight_layout()
  plt.savefig(png)
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def display_classes(png, images, classes, ncol=4):
  """
  Draw a number of images and their predictions

  Example:
  images = data[1][:12]
  classes = model.predict_classes('classes.png', images)
  """
  fig = plt.figure()
  nrow = len(images) / ncol
  if len(images) % ncol > 0: nrow = nrow + 1

  def draw(i):
    plt.subplot(nrow,ncol,i)
    plt.imshow(images[i].reshape(28,28), cmap='gray', interpolation='none')
    plt.title('Predicted: %s' % classes[i])
  [ draw(i) for i in range(0,len(images)) ]
  plt.tight_layout()
  plt.savefig(png)
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def showHeightMap(x,y,z,zi):
    ''' show height map in maptplotlib '''
    zi=zi.transpose()

    plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
               extent=[ y.min(), y.max(),x.min(), x.max()])

    plt.colorbar()

    CS = plt.contour(zi,15,linewidths=0.5,colors='k',
               extent=[ y.min(), y.max(),x.min(), x.max()])
    CS = plt.contourf(zi,15,cmap=plt.cm.rainbow, 
               extent=[ y.min(), y.max(),x.min(), x.max()])

    z=z.transpose()
    plt.scatter(y, x, c=z)

    # achsen umkehren
    #plt.gca().invert_xaxis()
    #plt.gca().invert_yaxis()

    plt.show()
    return
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
        edges = cv2.Canny(img,obj.minVal,obj.maxVal)
        color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        edges=color

        if True:
            print "zeige"
            cv2.imshow(obj.Label,edges)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=edges
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        print self
        print self.Object
        print self.Object.Name
        obj=self.Object
        img = cv2.imread(obj.imageFile)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = np.float32(gray)
        dst = cv2.cornerHarris(gray,3,3,0.00001)
        dst = cv2.dilate(dst,None)
        img[dst>0.01*dst.max()]=[0,0,255]

        from matplotlib import pyplot as plt
        plt.subplot(121),plt.imshow(img,cmap = 'gray')
        plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(dst,cmap = 'gray')
        plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
        plt.show()
项目:image-quantizer    作者:se7entyse7en    | 项目源码 | 文件源码
def render(self, show=True, new_figure=True):
        """Render the quantized image

        :param bool show: if the quantized image is also to be shown and not
                          only drawn.
        :param bool new_figure: if a new figure is to be used.

        """
        if new_figure:
            plt.figure()
            plt.clf()

        plt.title('{method} ({n_colors})'.format(
            method=self._method, n_colors=self._n_colors))
        plt.imshow(self._quantized_raster / 255.0)

        plt.draw()
        if show:
            plt.show()
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def main():
    # prepare object points
    nx = 8#TODO: enter the number of inside corners in x
    ny = 6#TODO: enter the number of inside corners in y

    # Make a list of calibration images
    fname = './calibration_wide/GOPR0058.jpg'
    img = cv2.imread(fname)
    plt.imshow(img)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    # If found, draw corners
    if ret == True:
        # Draw and display the corners
        cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
        plt.imshow(img)
        plt.show()