Python matplotlib.pyplot 模块,suptitle() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.suptitle()

项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_RandomForestRegressor_num(*data):
    '''
    test the performance with different n_estimators
    :param data: train_data, test_data, train_value, test_value
    :return: None
    '''
    X_train,X_test,y_train,y_test=data
    nums=np.arange(1,100,step=2)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for num in nums:
        regr=ensemble.RandomForestRegressor(n_estimators=num)
        regr.fit(X_train,y_train)
        training_scores.append(regr.score(X_train,y_train))
        testing_scores.append(regr.score(X_test,y_test))
    ax.plot(nums,training_scores,label="Training Score")
    ax.plot(nums,testing_scores,label="Testing Score")
    ax.set_xlabel("estimator num")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(-1,1)
    plt.suptitle("RandomForestRegressor")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_RandomForestRegressor_max_features(*data):
    '''
    test the performance with different max_features
    :param data:  train_data, test_data, train_value, test_value
    :return: None
    '''
    X_train,X_test,y_train,y_test=data
    max_features=np.linspace(0.01,1.0)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for max_feature in max_features:
        regr=ensemble.RandomForestRegressor(max_features=max_feature)
        regr.fit(X_train,y_train)
        training_scores.append(regr.score(X_train,y_train))
        testing_scores.append(regr.score(X_test,y_test))
    ax.plot(max_features,training_scores,label="Training Score")
    ax.plot(max_features,testing_scores,label="Testing Score")
    ax.set_xlabel("max_feature")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestRegressor")
    plt.show()
项目:nanoQC    作者:wdecoster    | 项目源码 | 文件源码
def per_base_sequence_content_and_quality(fqbin, qualbin, outdir, figformat):
    fig, axs = plt.subplots(2, 2, sharex='col', sharey='row')
    lines = plot_nucleotide_diversity(axs[0, 0], fqbin)
    plot_nucleotide_diversity(axs[0, 1], fqbin, invert=True)
    l_Q = plot_qual(axs[1, 0], qualbin)
    plot_qual(axs[1, 1], qualbin, invert=True)
    plt.setp([a.get_xticklabels() for a in axs[0, :]], visible=False)
    plt.setp([a.get_yticklabels() for a in axs[:, 1]], visible=False)
    for ax in axs[:, 1]:
        ax.set_ylabel('', visible=False)
    for ax in axs[0, :]:
        ax.set_xlabel('', visible=False)
    # Since axes are shared I should only invert once. Twice will restore the original axis order!
    axs[0, 1].invert_xaxis()
    plt.suptitle("Per base sequence content and quality")
    axl = fig.add_axes([0.4, 0.4, 0.2, 0.2])
    ax.plot()
    axl.axis('off')
    lines.append(l_Q)
    plt.legend(lines, ['A', 'T', 'G', 'C', 'Quality'], loc="center", ncol=5)
    plt.savefig(os.path.join(outdir, "PerBaseSequenceContentQuality." +
                             figformat), format=figformat, dpi=500)
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_labeled_images_random(image_list, label_list, categories, n, title_str, ypixels, xpixels, seed, filename):
    random.seed(seed)
    index_sample = random.sample(range(len(image_list)), n)
    plt.figure(figsize=(2*n, 2))
    #plt.suptitle(title_str)
    for i, ind in enumerate(index_sample):
        ax = plt.subplot(1, n, i + 1)
        plt.imshow(image_list[ind].reshape(ypixels, xpixels))
        plt.gray()
        ax.set_title(categories[label_list[ind]], fontsize=20)
        ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False)
    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_unlabeled_images_random: plots unlabeled images at random
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_unlabeled_images_random(image_list, n, title_str, ypixels, xpixels, seed, filename):
    random.seed(seed)
    index_sample = random.sample(range(len(image_list)), n)
    plt.figure(figsize=(2*n, 2))
    plt.suptitle(title_str)
    for i, ind in enumerate(index_sample):
        ax = plt.subplot(1, n, i + 1)
        plt.imshow(image_list[ind].reshape(ypixels, xpixels))
        plt.gray()
        ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False)
    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_compare: given test images and their reconstruction, we plot them for visual comparison
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def show_shrinkage(shrink_func,theta,**kwargs):
    tf.reset_default_graph()
    tf.set_random_seed(kwargs.get('seed',1) )

    N = kwargs.get('N',500)
    L = kwargs.get('L',4)
    nsigmas = kwargs.get('sigmas',10)
    shape = (N,L)
    rvar = 1e-4
    r = np.reshape( np.linspace(0,nsigmas,N*L)*math.sqrt(rvar),shape)
    r_ = tfcf(r)
    rvar_ = tfcf(np.ones(L)*rvar)

    xhat_,dxdr_ = shrink_func(r_,rvar_ ,tfcf(theta))

    with tf.Session() as sess:
        sess.run( tf.global_variables_initializer() )
        xhat = sess.run(xhat_)
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.plot(r.reshape(-1),r.reshape(-1),'y')
    plt.plot(r.reshape(-1),xhat.reshape(-1),'b')
    if kwargs.has_key('title'):
        plt.suptitle(kwargs['title'])
    plt.show()
项目:LinearCorex    作者:gregversteeg    | 项目源码 | 文件源码
def plot_convergence(history, prefix='', prefix2=''):
    plt.figure(figsize=(8, 5))
    ax = plt.subplot(111)

    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

    plt.plot(history["TC"], '-', lw=2.5, color=tableau20[0])
    x = len(history["TC"])
    y = np.max(history["TC"])
    plt.text(0.5 * x, 0.8 * y, "TC", fontsize=18, fontweight='bold', color=tableau20[0])

    if history.has_key("additivity"):
        plt.plot(history["additivity"], '-', lw=2.5, color=tableau20[1])
        plt.text(0.5 * x, 0.3 * y, "additivity", fontsize=18, fontweight='bold', color=tableau20[1])

    plt.ylabel('TC', fontsize=12, fontweight='bold')
    plt.xlabel('# Iterations', fontsize=12, fontweight='bold')
    plt.suptitle('Convergence', fontsize=12)
    filename = '{}/summary/convergence{}.pdf'.format(prefix, prefix2)
    if not os.path.exists(os.path.dirname(filename)):
        os.makedirs(os.path.dirname(filename))
    plt.savefig(filename, bbox_inches="tight")
    plt.close('all')
    return True
项目:trend_ml_toolkit_xgboost    作者:raymon-tian    | 项目源码 | 文件源码
def fea_plot(xg_model, feature, label, type = 'weight', max_num_features = None):
    fig, AX = plt.subplots(nrows=1, ncols=2)
    xgb.plot_importance(xg_model, xlabel=type, importance_type='weight', ax=AX[0], max_num_features=max_num_features)

    fscore = xg_model.get_score(importance_type=type)
    fscore = sorted(fscore.items(), key=itemgetter(1), reverse=True) # sort scores
    fea_index = get_fea_index(fscore, max_num_features)
    feature = feature[:, fea_index]
    dimension = len(fea_index)
    X = range(1, dimension+1)
    Yp = np.mean(feature[np.where(label==1)[0]], axis=0)
    Yn = np.mean(feature[np.where(label!=1)[0]], axis=0)
    for i in range(0, dimension):
        param = np.fmax(Yp[i], Yn[i])
        Yp[i] /= param
        Yn[i] /= param
    p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
    p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
    AX[1].legend((p1,p2), ('Malware', 'Normal'))
    AX[1].set_title('Comparison of selected features by their means')
    AX[1].set_xlabel('Feature Index')
    AX[1].set_ylabel('Mean Value')
    AX[1].set_ylim(-1.1, 1.1)
    plt.xticks(X, fea_index+1, rotation=80)
    plt.suptitle('Feature Selection results')
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def compare_images(image_a, image_b, title):
    # compute the mean squared error and structural similarity
    # index for the images
    m = mse(image_a, image_b)
    s = compare_ssim(image_a, image_b, multichannel=True)

    # setup the figure
    fig = plt.figure(title)
    plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))

    # show first image
    ax = fig.add_subplot(1, 2, 1)
    plt.imshow(image_a, cmap=plt.cm.gray)
    plt.axis("off")

    # show the second image
    ax = fig.add_subplot(1, 2, 2)
    plt.imshow(image_b, cmap=plt.cm.gray)
    plt.axis("off")

    # show the images
    plt.show()
项目:ML-From-Scratch    作者:eriklindernoren    | 项目源码 | 文件源码
def save_imgs(self, epoch):
        r, c = 5, 5 # Grid size
        noise = np.random.normal(0, 1, (r * c, self.latent_dim))
        # Generate images and reshape to image shape
        gen_imgs = self.generator.predict(noise).reshape((-1, self.img_rows, self.img_cols))

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig, axs = plt.subplots(r, c)
        plt.suptitle("Generative Adversarial Network")
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray')
                axs[i,j].axis('off')
                cnt += 1
        fig.savefig("mnist_%d.png" % epoch)
        plt.close()
项目:ML-From-Scratch    作者:eriklindernoren    | 项目源码 | 文件源码
def save_imgs(self, epoch):
        r, c = 5, 5
        noise = np.random.normal(0, 1, (r * c, 100))
        gen_imgs = self.generator.predict(noise)

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig, axs = plt.subplots(r, c)
        plt.suptitle("Generative Adversarial Network")
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i,j].imshow(gen_imgs[cnt,0,:,:], cmap='gray')
                axs[i,j].axis('off')
                cnt += 1
        fig.savefig("mnist_%d.png" % epoch)
        plt.close()
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
    reward = 0
    signal.fillna(value=0, inplace=True)

    if eval == False:
        bt = twp.Backtest(pd.Series(data=[x for x in xdata[time_step-2:time_step]], index=signal[time_step-2:time_step].index.values), signal[time_step-2:time_step], signalType='shares')
        reward = ((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2])*bt.data['shares'].iloc[-1])

    if terminal_state == 1 and eval == True:
        #save a figure of the test set
        bt = twp.Backtest(pd.Series(data=[x for x in xdata], index=signal.index.values), signal, signalType='shares')
        reward = bt.pnl.iloc[-1]
        plt.figure(figsize=(3,4))
        bt.plotTrades()
        plt.axvline(x=400, color='black', linestyle='--')
        plt.text(250, 400, 'training data')
        plt.text(450, 400, 'test data')
        plt.suptitle(str(epoch))
        plt.savefig('plt/'+str(epoch)+'.png', bbox_inches='tight', pad_inches=1, dpi=72)
        plt.close('all')
    #print(time_step, terminal_state, eval, reward)

    return reward
项目:strategy    作者:kanghua309    | 项目源码 | 文件源码
def _render(self, mode='human', close=False):
        if self.inited == False: return
        if self.render_on == 0:
            # self.fig = plt.figure(figsize=(10, 4))
            self.fig = plt.figure(figsize=(12, 6))
            self.render_on = 1
            plt.ion()

        plt.clf()
        self._plot_trades()
        plt.suptitle("Code: " + self.src.symbol + ' ' + \
                     "Round:" + str(self.reset_count) + "-" + \
                     "Step:" + str(self.src.idx - self.src.orgin_idx) + "  (" + \
                     "from:" + self.src.reset_start_day + " " + \
                     "to:" + self.src.reset_end_day + ")")
        plt.pause(0.001)
        return self.fig
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def plot_marginals(self, selector=None, bins=20, axes=None, all=False, **kwargs):
        """Plot marginal distributions for parameters for all populations.

        Parameters
        ----------
        selector : iterable of ints or strings, optional
            Indices or keys to use from samples. Default to all.
        bins : int, optional
            Number of bins in histograms.
        axes : one or an iterable of plt.Axes, optional
        all : bool, optional
            Plot the marginals of all populations

        """
        if all is False:
            super(SmcSample, self).plot_marginals()
            return

        fontsize = kwargs.pop('fontsize', 13)
        for i, pop in enumerate(self.populations):
            pop.plot_marginals(selector=selector, bins=bins, axes=axes)
            plt.suptitle("Population {}".format(i), fontsize=fontsize)
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def plot_pairs(self, selector=None, bins=20, axes=None, all=False, **kwargs):
        """Plot pairwise relationships as a matrix with marginals on the diagonal.

        The y-axis of marginal histograms are scaled.

        Parameters
        ----------
        selector : iterable of ints or strings, optional
            Indices or keys to use from samples. Default to all.
        bins : int, optional
            Number of bins in histograms.
        axes : one or an iterable of plt.Axes, optional
        all : bool, optional
            Plot for all populations

        """
        if all is False:
            super(SmcSample, self).plot_marginals()
            return

        fontsize = kwargs.pop('fontsize', 13)
        for i, pop in enumerate(self.populations):
            pop.plot_pairs(selector=selector, bins=bins, axes=axes)
            plt.suptitle("Population {}".format(i), fontsize=fontsize)
项目:MarkovModels    作者:pmontalb    | 项目源码 | 文件源码
def plot_probability_distribution_function(self, initial_point=None, title=""):
        if initial_point is None:
            initial_point = self.x0

        fig = plt.figure()
        n_plots = 1 if self.total_cumulative_stochastic_kernels == {} else 2

        ax1 = fig.add_subplot(1, n_plots, 1)
        if n_plots > 1:
            ax2 = fig.add_subplot(1, n_plots, 2)
        for t in sorted(self.total_stochastic_kernels):
            ax1.plot(self.grid, self.total_stochastic_kernels[t][initial_point, :], label=str(t))
            if n_plots > 1:
                ax2.plot(self.grid, self.total_cumulative_stochastic_kernels[t][initial_point, :],
                         label=str(t))

        ax1.set_title("Probability Distribution Function")
        if n_plots > 1:
            ax2.set_title("Cumulative Distribution Function")

        if n_plots > 1:
            ax2.legend()

        plt.suptitle(title)
        plt.show()
项目:sparks    作者:ImpactHorizon    | 项目源码 | 文件源码
def save_heatmap(heatmap, mask):
    plt.clf()
    xmin, xmax, ymin, ymax = 0, heatmap.shape[1], heatmap.shape[0], 0
    extent = xmin, xmax, ymin, ymax
    alpha=1.0
    if mask is not None:
        alpha=0.5
        xmin, xmax, ymin, ymax = (0, max(heatmap.shape[1], mask.shape[1]), 
                                    max(heatmap.shape[0], mask.shape[0]), 0)
        extent = xmin, xmax, ymin, ymax
        plt.imshow(mask, extent=extent)
        plt.hold(True)
    plt.suptitle("Heatmap of sampled tiles.")
    plt.imshow(heatmap, cmap='gnuplot', interpolation='nearest', extent=extent,
                alpha=alpha)
    return plt
项目:evaluation-toolkit    作者:lightfield-analysis    | 项目源码 | 文件源码
def plot_pairwise_comparison(algo1, algo2, scenes, n_scenes_per_row=4, subdir="pairwise_diffs"):
    rows, cols = int(np.ceil(len(scenes) / float(n_scenes_per_row))), n_scenes_per_row
    fig = plt.figure(figsize=(4*cols, 3*rows))

    for idx_s, scene in enumerate(scenes):
        algo_result_1 = misc.get_algo_result(algo1, scene)
        algo_result_2 = misc.get_algo_result(algo2, scene)
        gt = scene.get_gt()

        plt.subplot(rows, cols, idx_s+1)
        cb = plt.imshow(np.abs(algo_result_1 - gt) - np.abs(algo_result_2 - gt),
                        interpolation="none", cmap=cm.seismic, vmin=-.1, vmax=.1)
        plt.colorbar(cb, shrink=0.7)
        plt.title(scene.get_display_name())

    # title
    a1 = algo1.get_display_name()
    a2 = algo2.get_display_name()
    plt.suptitle("|%s - GT| - |%s - GT|\nblue: %s is better, red: %s is better" % (a1, a2, a1, a2))

    fig_name = "pairwise_diffs_%s_%s" % (algo1.get_name(), algo2.get_name())
    fig_path = plotting.get_path_to_figure(fig_name, subdir=subdir)
    plotting.save_tight_figure(fig, fig_path, hide_frames=True, padding_top=0.85,
                               hspace=0.15, wspace=0.15)
项目:udacity-deep-learning    作者:hankcs    | 项目源码 | 文件源码
def load_and_display_pickle(datasets, sample_size, title=None):
    fig = plt.figure()
    if title: fig.suptitle(title, fontsize=16, fontweight='bold')
    num_of_images = []
    for pickle_file in datasets:
        with open(pickle_file, 'rb') as f:
            data = pickle.load(f)
            print('Total images in', pickle_file, ':', len(data))

            for index, image in enumerate(data):
                if index == sample_size: break
                ax = fig.add_subplot(len(datasets), sample_size, sample_size * datasets.index(pickle_file) +
                                     index + 1)
                ax.imshow(image)
                ax.set_axis_off()
                ax.imshow(image)

            num_of_images.append(len(data))

    balance_check(num_of_images)
    plt.show()
    return num_of_images
项目:segmentation    作者:zengyu714    | 项目源码 | 文件源码
def show_slices(im_3d, indices=None):
    """ Function to display slices of 3-d image """

    plt.rcParams['image.cmap'] = 'gray'

    if indices is None:
        indices = np.array(im_3d.shape) // 2
    assert len(indices) == 3, """Except 3-d array, but receive %d-d array
    indexing.""" % len(indices)

    x_th, y_th, z_th = indices
    fig, axes = plt.subplots(1, 3)
    axes[0].imshow(im_3d[x_th, :, :])
    axes[1].imshow(im_3d[:, y_th, :])
    axes[2].imshow(im_3d[:, :, z_th])
    plt.suptitle("Center slices for spine image")
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_waveforms(self):
        nb_cells = len(self.cells)
        nb_cols = int(np.sqrt(nb_cells - 1)) + 1
        nb_rows = (nb_cells - 1) / nb_cols + 1
        plt.figure()
        for cell in self.cells.itervalues():
            plt.subplot(nb_rows, nb_cols, cell.id + 1)
            t_min = 0.0
            t_max = float(81) / self.sampling_rate
            t = np.linspace(t_min, t_max, num=81)
            w = cell.sample(0.0, t)
            t = 1.0e3 * t
            plt.plot(t, w, color=cell.color)
            plt.xlim(t[0], t[-1])
        plt.suptitle(r"Waveforms")
        plt.tight_layout()
        plt.subplots_adjust(top=0.92)
        return
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def plot_pred_vs_image(img,preds_df,out_name):
    # function to plot predictions vs image
    f, axarr = plt.subplots(2, 1)
    plt.suptitle("ResNet50- PreTrained on ImageNet")
    axarr[0].imshow(img)
    sns.set_style("whitegrid")
    pl = sns.barplot(data = preds_df, x='Score', y='Species')
    axarr[1] = sns.barplot(data = preds_df, x='Score', y='Species',)
    axarr[0].autoscale(enable=False)
    axarr[0].get_xaxis().set_ticks([])
    axarr[0].get_yaxis().set_ticks([])
    axarr[1].autoscale(enable=False)
    gs = gridspec.GridSpec(2,1, width_ratios=[1],height_ratios=[1,0.1])
    plt.tight_layout()
    plt.savefig(out_name + '.png')


#########################
# Models
#########################

# load model
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def plot_pdf(score_export, fname, swap=None, cutoff=1):
    cut_data = np.array([p for g, p in score_export.roc() if p < cutoff])

    plots = ['density', 'kde']
    n = len(plots)

    for i, f in enumerate(plots):
        plt.subplot(n, 1, i + 1)
        if f == 'density':
            plot_seaborn_density(cut_data)
        elif f == 'split':
            plot_seaborn_density_split(swap, cutoff)
        elif f == 'kde':
            plot_kde(cut_data)

    plt.suptitle('Probability Density Function')
    plt.tight_layout()
    plt.subplots_adjust(top=0.93)

    if fname:
        plt.savefig(fname, dpi=300)
    else:
        plt.show()
项目:flight-data-processor    作者:junzis    | 项目源码 | 文件源码
def filterplot(ts, alts, spds, rocs, fltr, fltrname):
    ts_f, alts_f = fltr.filter(ts, alts)
    ts_f, spds_f = fltr.filter(ts, spds)
    ts_f, rocs_f = fltr.filter(ts, rocs)

    plt.suptitle(fltrname)
    plt.subplot(311)
    plt.plot(ts, alts, '.', color='blue', alpha=0.5)
    plt.plot(ts_f, alts_f, '-', color='red')
    plt.xlabel('time (s)')

    plt.subplot(312)
    plt.plot(ts, spds, '.', color='green', alpha=0.5)
    plt.plot(ts_f, spds_f, '-', color='red')
    plt.xlabel('time (s)')


    plt.subplot(313)
    plt.plot(ts, rocs, '.', color='blue', alpha=0.5)
    plt.plot(ts_f, rocs_f, '-', color='red')
    plt.xlabel('time (s)')
项目:dynamic-systems-and-chaos    作者:madrisan    | 项目源码 | 文件源码
def plot(self):
        """Plot a Final State Diagram """

        self.getxy()

        plt.suptitle('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')
        plt.title('Final State Diagram for the ' + self.map_longname)

        plt.xlim([self.map_ymin, self.map_ymax])
        plt.ylim([0, 1.])
        plt.yticks([])

        plt.grid(True)

        plt.plot([self.map_ymin, self.map_ymax], [.5, .5],
                 color='black', lw=1)
        plt.plot(self.x[self.s:], self.y1[self.s:], color='black', linestyle='',
                 markerfacecolor='black', marker='o', markersize=8)
        plt.text(.1 * self.map_ymax, .4, 'r = %g' % self.r, style='italic',
                 bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})

        plt.show()
项目:dynamic-systems-and-chaos    作者:madrisan    | 项目源码 | 文件源码
def plot(self):
        plt.suptitle('Dynamic Systems and Chaos', fontsize=14, fontweight='bold')
        plt.title('Bifurcation Diagram for the ' + self.map_longname)

        plt.xlim([self.rmin, self.rmax])
        plt.xticks([round(i, 1) for i in np.linspace(self.rmin, self.rmax, 5)])
        plt.xlabel('r')

        plt.ylim([self.ymin, self.ymax])
        plt.ylabel('final states')

        for r in np.linspace(self.rmin, self.rmax, 1000):
            x, y = FinalState(r, self.n, .5, self.s, self.map_name).getxy(r)
            plt.plot(y[self.s:], x[self.s:], color='black', linestyle='',
                     markerfacecolor='black', marker=',', markersize=1)

        plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_RandomForestClassifier_num(*data):
    '''
    test the performance with different n_estimators
    :param data: train_data, test_data, train_value, test_value
    :return: None
    '''
    X_train,X_test,y_train,y_test=data
    nums=np.arange(1,100,step=2)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for num in nums:
        clf=ensemble.RandomForestClassifier(n_estimators=num)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(nums,training_scores,label="Training Score")
    ax.plot(nums,testing_scores,label="Testing Score")
    ax.set_xlabel("estimator num")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_RandomForestClassifier_max_depth(*data):
    '''
    test the performance with different max_depth
    :param data:  train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    maxdepths=range(1,20)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for max_depth in maxdepths:
        clf=ensemble.RandomForestClassifier(max_depth=max_depth)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(maxdepths,training_scores,label="Training Score")
    ax.plot(maxdepths,testing_scores,label="Testing Score")
    ax.set_xlabel("max_depth")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_RandomForestClassifier_max_features(*data):
    '''
    test the performance with different max_features
    :param data: train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    max_features=np.linspace(0.01,1.0)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for max_feature in max_features:
        clf=ensemble.RandomForestClassifier(max_features=max_feature)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(max_features,training_scores,label="Training Score")
    ax.plot(max_features,testing_scores,label="Testing Score")
    ax.set_xlabel("max_feature")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingClassifier_maxdepth(*data):
    '''
    test the performance with different max_depth
    :param data:     train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    maxdepths=np.arange(1,20)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for maxdepth in maxdepths:
        clf=ensemble.GradientBoostingClassifier(max_depth=maxdepth,max_leaf_nodes=None)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(maxdepths,training_scores,label="Training Score")
    ax.plot(maxdepths,testing_scores,label="Testing Score")
    ax.set_xlabel("max_depth")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingClassifier_learning(*data):
    '''
    test the performance with different learning rate
    :param data:     train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    learnings=np.linspace(0.01,1.0)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for learning in learnings:
        clf=ensemble.GradientBoostingClassifier(learning_rate=learning)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(learnings,training_scores,label="Training Score")
    ax.plot(learnings,testing_scores,label="Testing Score")
    ax.set_xlabel("learning_rate")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingClassifier_subsample(*data):
    '''
    test the performance with different subsample
    :param data:    train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    subsamples=np.linspace(0.01,1.0)
    testing_scores=[]
    training_scores=[]
    for subsample in subsamples:
            clf=ensemble.GradientBoostingClassifier(subsample=subsample)
            clf.fit(X_train,y_train)
            training_scores.append(clf.score(X_train,y_train))
            testing_scores.append(clf.score(X_test,y_test))
    ax.plot(subsamples,training_scores,label="Training Score")
    ax.plot(subsamples,testing_scores,label="Training Score")
    ax.set_xlabel("subsample")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingClassifier_max_features(*data):
    '''
    test the performance with different max_features
    :param data:     train_data, test_data, train_value, test_value
    :return:   None
    '''
    X_train,X_test,y_train,y_test=data
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    max_features=np.linspace(0.01,1.0)
    testing_scores=[]
    training_scores=[]
    for features in max_features:
            clf=ensemble.GradientBoostingClassifier(max_features=features)
            clf.fit(X_train,y_train)
            training_scores.append(clf.score(X_train,y_train))
            testing_scores.append(clf.score(X_test,y_test))
    ax.plot(max_features,training_scores,label="Training Score")
    ax.plot(max_features,testing_scores,label="Training Score")
    ax.set_xlabel("max_features")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingClassifier")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingRegressor_num(*data):
    '''
    test the performance with different n_estimators
    :param data:  train_data, test_data, train_value, test_value
    :return:   None
    '''
    X_train,X_test,y_train,y_test=data
    nums=np.arange(1,200,step=2)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for num in nums:
        regr=ensemble.GradientBoostingRegressor(n_estimators=num)
        regr.fit(X_train,y_train)
        training_scores.append(regr.score(X_train,y_train))
        testing_scores.append(regr.score(X_test,y_test))
    ax.plot(nums,training_scores,label="Training Score")
    ax.plot(nums,testing_scores,label="Testing Score")
    ax.set_xlabel("estimator num")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingRegressor")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingRegressor_learning(*data):
    '''
    test the performance with different learning rate
    :param data:   train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    learnings=np.linspace(0.01,1.0)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for learning in learnings:
        regr=ensemble.GradientBoostingRegressor(learning_rate=learning)
        regr.fit(X_train,y_train)
        training_scores.append(regr.score(X_train,y_train))
        testing_scores.append(regr.score(X_test,y_test))
    ax.plot(learnings,training_scores,label="Training Score")
    ax.plot(learnings,testing_scores,label="Testing Score")
    ax.set_xlabel("learning_rate")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(-1,1.05)
    plt.suptitle("GradientBoostingRegressor")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingRegressor_subsample(*data):
    '''
    test the performance with different subsample
    :param data:    train_data, test_data, train_value, test_value
    :return:  None
    '''
    X_train,X_test,y_train,y_test=data
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    subsamples=np.linspace(0.01,1.0,num=20)
    testing_scores=[]
    training_scores=[]
    for subsample in subsamples:
            regr=ensemble.GradientBoostingRegressor(subsample=subsample)
            regr.fit(X_train,y_train)
            training_scores.append(regr.score(X_train,y_train))
            testing_scores.append(regr.score(X_test,y_test))
    ax.plot(subsamples,training_scores,label="Training Score")
    ax.plot(subsamples,testing_scores,label="Training Score")
    ax.set_xlabel("subsample")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(-1,1.05)
    plt.suptitle("GradientBoostingRegressor")
    plt.show()
项目:ML-note    作者:JasonK93    | 项目源码 | 文件源码
def test_GradientBoostingRegressor_max_features(*data):
    '''
    test the performance with different max_features
    :param data:  train_data, test_data, train_value, test_value
    :return: None
    '''
    X_train,X_test,y_train,y_test=data
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    max_features=np.linspace(0.01,1.0)
    testing_scores=[]
    training_scores=[]
    for features in max_features:
            regr=ensemble.GradientBoostingRegressor(max_features=features)
            regr.fit(X_train,y_train)
            training_scores.append(regr.score(X_train,y_train))
            testing_scores.append(regr.score(X_test,y_test))
    ax.plot(max_features,training_scores,label="Training Score")
    ax.plot(max_features,testing_scores,label="Training Score")
    ax.set_xlabel("max_features")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("GradientBoostingRegressor")
    plt.show()
项目:CoherentXrayImaging    作者:susannahammarberg    | 项目源码 | 文件源码
def plot_crystal3D_reciprocal():
    plt.figure()
    #plt.title('hej')
    plt.subplot(221)
    plt.imshow(np.log10(abs(crystal3D_fourier[:,:,174])),  cmap='gray')
    #plt.title('xy plane cut')
    plt.xlabel(' x')
    plt.ylabel(' y')
    plt.colorbar()

    plt.subplot(222)
    plt.imshow(np.log10(abs(crystal3D_fourier[173,:,:])),  cmap='gray')
    #plt.title('xz plane ')
    plt.xlabel(' z')
    plt.ylabel(' x')    #rätt
    plt.colorbar()

    plt.subplot(223)
    plt.imshow(np.log10(abs(crystal3D_fourier[:,173,:])), cmap='gray')
    #plt.title('yz plane')
    plt.xlabel(' z')
    plt.ylabel(' y')
    plt.colorbar()

    plt.suptitle('Plane cuts of crystal in reciprocal space')
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def show_with_diff(image, reference, title):
    """Helper function to display denoising"""
    plt.figure(figsize=(5, 3.3))
    plt.subplot(1, 2, 1)
    plt.title('Image')
    plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
               interpolation='nearest')
    plt.xticks(())
    plt.yticks(())
    plt.subplot(1, 2, 2)
    difference = image - reference

    plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
    plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
               interpolation='nearest')
    plt.xticks(())
    plt.yticks(())
    plt.suptitle(title, size=16)
    plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
    plt.figure(figsize=(2. * n_col, 2.26 * n_row))
    plt.suptitle(title, size=16)
    for i, comp in enumerate(images):
        plt.subplot(n_row, n_col, i + 1)
        vmax = max(comp.max(), -comp.min())
        plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
                   interpolation='nearest',
                   vmin=-vmax, vmax=vmax)
        plt.xticks(())
        plt.yticks(())
    plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)

###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def plot_time_vs_s(time, norm, point_labels, title):
    plt.figure()
    colors = ['g', 'b', 'y']
    for i, l in enumerate(sorted(norm.keys())):
        if l is not "fbpca":
            plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
        else:
            plt.plot(time[l], norm[l], label=l, marker='^', c='red')

        for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
            plt.annotate(label, xy=(x, y), xytext=(0, -20),
                         textcoords='offset points', ha='right', va='bottom')
    plt.legend(loc="upper right")
    plt.suptitle(title)
    plt.ylabel("norm discrepancy")
    plt.xlabel("running time [s]")
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def scatter_time_vs_s(time, norm, point_labels, title):
    plt.figure()
    size = 100
    for i, l in enumerate(sorted(norm.keys())):
        if l is not "fbpca":
            plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
            for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
                plt.annotate(label, xy=(x, y), xytext=(0, -80),
                             textcoords='offset points', ha='right',
                             arrowprops=dict(arrowstyle="->",
                                             connectionstyle="arc3"),
                             va='bottom', size=11, rotation=90)
        else:
            plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
            for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
                plt.annotate(label, xy=(x, y), xytext=(0, 30),
                             textcoords='offset points', ha='right',
                             arrowprops=dict(arrowstyle="->",
                                             connectionstyle="arc3"),
                             va='bottom', size=11, rotation=90)

    plt.legend(loc="best")
    plt.suptitle(title)
    plt.ylabel("norm discrepancy")
    plt.xlabel("running time [s]")
项目:tf-sparql    作者:derdav3    | 项目源码 | 文件源码
def plot_res(test_err, train_batch_loss, benchmark_err, epoch):
    flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]

    test_x_val = np.array(list(x * 3 for x in range(0, len(test_err))))

    plt.plot(train_batch_loss[0],train_batch_loss[1], label="Training error", c=flatui[1], alpha=0.5)
    plt.plot(test_x_val, np.array(test_err), label="Test error", c=flatui[0])
    plt.axhline(y=benchmark_err[1], linestyle='dashed', label="No-modell error", c=flatui[2])
    plt.axhline(y=0.098, linestyle='dashed', label="State of the art error", c=flatui[3])

    plt.suptitle("Model error - cold queries")
    plt.yscale('log', nonposy='clip')
    plt.xlim([0,epoch+1])
    # second_axes = plt.twinx() # create the second axes, sharing x-axis
    # second_axes.set_yticks([0.2,0.4]) # list of your y values
    plt.xlabel('epoch')
    plt.ylabel('error')
    plt.legend(loc='upper right')
    plt.show()
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def compare(algorithms, pts=2000, maxrun=5, progress=True):
        """
        Compares the given list of Searching algorithms and Plots a bar chart

        :param algorithms: List of Searching algorithms
        :param pts: Number of elements in testing array
        :param maxrun: Number of iterations to take average
        :param progress: Whether to show Progress bar or not
         """
        arr = np.arange(pts)
        algorithms = [x() for x in algorithms]
        operations = {x.name: 0 for x in algorithms}
        print('Please wait while comparing Searching Algorithms')
        if progress:
            import progressbar
            count = 0
            max_count = maxrun * len(algorithms)
            bar = progressbar.ProgressBar(max_value=max_count)
        for _ in range(maxrun):
            key = np.random.randint(0, 2000)
            for algorithm in algorithms:
                if progress:
                    count += 1
                    bar.update(count)
                algorithm.search(arr, key)
                operations[algorithm.name] += algorithm.count
        operations = [(k, v / maxrun) for k, v in operations.items()]
        plt.suptitle('Searching Algorithm Comparision\nAveraged over {} loops'.format(maxrun))
        rects = plt.bar(left=np.arange(len(operations)), height=[y for (x, y) in operations])
        plt.xticks(np.arange(len(operations)), [x for (x, y) in operations])
        ax = plt.axes()
        for rect in rects:
            height = rect.get_height()
            ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
                    '%d' % int(height),
                    ha='center', va='bottom')
        plt.ylabel('Average number of basic operations')
        plt.show()
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def compare(algorithms, pts=2000, maxrun=5, progress=True):
        """
        Compares the given list of Sorting algorithms over and Plots a bar chart

        :param algorithms: List of Sorting algorithms
        :param pts: Number of elements in testing array
        :param maxrun: Number of iterations to take average
        :param progress: Whether to show progress bar or not
        """
        base_arr = np.arange(pts)
        np.random.shuffle(base_arr)
        algorithms = [x() for x in algorithms]  # Instantiate
        operations = {x.name: 0 for x in algorithms}
        print('Please wait while comparing Sorting Algorithms')
        if progress:
            import progressbar
            count = 0
            max_count = maxrun * len(algorithms)
            bar = progressbar.ProgressBar(max_value=max_count)
        for _ in range(maxrun):
            for algorithm in algorithms:
                if progress:
                    count += 1
                    bar.update(count)
                algorithm.sort(base_arr)
                operations[algorithm.name] += algorithm.count
                np.random.shuffle(base_arr)
        operations = [(k, v / maxrun) for k, v in operations.items()]
        plt.suptitle('Sorting Algorithm Comparision\nAveraged over {} loops'.format(maxrun))
        rects = plt.bar(left=np.arange(len(operations)), height=[y for (x, y) in operations])
        plt.xticks(np.arange(len(operations)), [x for (x, y) in operations])
        ax = plt.axes()
        for rect in rects:
            height = rect.get_height()
            ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
                    '%d' % int(height),
                    ha='center', va='bottom')
        plt.ylabel('Average number of basic operations')
        plt.show()
项目:histwords    作者:williamleif    | 项目源码 | 文件源码
def plot_words(word1, words, fitted, cmap, sims):
    # TODO: remove this and just set the plot axes directly
    plt.scatter(fitted[:,0], fitted[:,1], alpha=0)
    plt.suptitle("%s" % word1, fontsize=30, y=0.1)
    plt.axis('off')

    annotations = []
    isArray = type(word1) == list
    for i in xrange(len(words)):
        pt = fitted[i]

        ww,decade = [w.strip() for w in words[i].split("|")]
        color = cmap((int(decade) - 1840) / 10 + CMAP_MIN)
        word = ww
        sizing = sims[words[i]] * 30

        # word1 is the word we are plotting against
        if ww == word1 or (isArray and ww in word1):
            annotations.append((ww, decade, pt))
            word = decade
            color = 'black'
            sizing = 15


        plt.text(pt[0], pt[1], word, color=color, size=int(sizing))

    return annotations
项目:tf-image-segmentation    作者:VittalP    | 项目源码 | 文件源码
def _discrete_matshow_adaptive(data, labels_names=[], title=""):
    """Displays segmentation results using colormap that is adapted
    to a number of classes. Uses labels_names to write class names
    aside the color label. Used as a helper function for 
    visualize_segmentation_adaptive() function.

    Parameters
    ----------
    data : 2d numpy array (width, height)
        Array with integers representing class predictions
    labels_names : list
        List with class_names
    """

    fig_size = [7, 6]
    plt.rcParams["figure.figsize"] = fig_size

    #get discrete colormap
    cmap = plt.get_cmap('Paired', np.max(data)-np.min(data)+1)

    # set limits .5 outside true range
    mat = plt.matshow(data,
                      cmap=cmap,
                      vmin = np.min(data)-.5,
                      vmax = np.max(data)+.5)

    #tell the colorbar to tick at integers
    cax = plt.colorbar(mat,
                       ticks=np.arange(np.min(data),np.max(data)+1))

    # The names to be printed aside the colorbar
    if labels_names:
        cax.ax.set_yticklabels(labels_names)

    if title:
        plt.suptitle(title, fontsize=15, fontweight='bold')

    plt.show()
项目:NLP-JD    作者:ZexinYan    | 项目源码 | 文件源码
def show_heat_map(self):
            pd.set_option('precision', 2)
            plt.figure(figsize=(20, 6))
            sns.heatmap(self.data.corr(), square=True)
            plt.xticks(rotation=90)
            plt.yticks(rotation=360)
            plt.suptitle("Correlation Heatmap")
            plt.show()
项目:NLP-JD    作者:ZexinYan    | 项目源码 | 文件源码
def show_heat_map_to(self, target='sentiment'):
            correlations = self.data.corr()[target].sort_values(ascending=False)
            plt.figure(figsize=(40, 6))
            correlations.drop(target).plot.bar()
            pd.set_option('precision', 2)
            plt.xticks(rotation=90, fontsize=7)
            plt.yticks(rotation=360)
            plt.suptitle('The Heatmap of Correlation With ' + target)
            plt.show()
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, title="Confusion Matrix"):
    """Plots a confusion matrix for each subject
    """
    import matplotlib.pyplot as plt
    import math
    plt.figure()
    subjects = len(cm)
    root_subjects = math.sqrt(subjects)
    cols = math.ceil(root_subjects)
    rows = math.ceil(subjects/cols)
    classes = cm[0].shape[0]
    for subject in range(subjects):
        plt.subplot(rows, cols, subject+1)
        plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone)
        plt.xticks(np.arange(classes), range(1, classes+1))
        plt.yticks(np.arange(classes), range(1, classes+1))
        cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6)
        cbar.set_clim(0.0, 1.0)
        plt.xlabel("Predicted")
        plt.ylabel("True label")
        plt.title("{0:d}".format(subject + 1))
    plt.suptitle(title)
    plt.tight_layout()
    plt.show()

# Load the input data that contains the image stimuli and its labels for training a classifier