Python matplotlib.pyplot 模块,title() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.title()

项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def imshow(tensor, imsize=512, title=None):
    image = tensor.clone().cpu()
    image = image.view(*tensor.size())
    image = transforms.ToPILImage()(image)
    plt.imshow(image)
    if title is not None:
        plt.title(title)
    plt.pause(5)
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_sent_trajectories(sents, decode_plot):

    font = {'family' : 'normal',
            'size'   : 14}

    matplotlib.rc('font', **font) 
    i = 0    
    l = ["Portuguese","Catalan"]

    axes = plt.gca()
    #axes.set_xlim([xmin,xmax])
    axes.set_ylim([-1,1])

    for sent, enc in zip(sents, decode_plot):
    if i==2: continue
        i += 1
        #times = np.arange(len(enc))
        times = np.linspace(0,1,len(enc))
        plt.plot(times, enc, label=l[i-1])
    plt.title("Hidden Node Trajectories")
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.legend(loc='best')
    plt.savefig("final_tests/cr_por_cat_hidden_cell_trajectories", bbox_inches="tight")
    plt.close()
项目:hippylib    作者:hippylib    | 项目源码 | 文件源码
def plot_pts(points, values, colorbar=True, subplot_loc=None, mytitle=None, show_axis='on', vmin=None, vmax=None, xlim=(0,1), ylim=(0,1)):
    if subplot_loc is not None:
        plt.subplot(subplot_loc)

    pp = plt.scatter(points[:,0], points[:,1], c=values.get_local(), marker=",", s=20, vmin=vmin, vmax=vmax)

    plt.axis(show_axis)

    if colorbar:
        plt.colorbar(pp, fraction=.1, pad=0.2)
    else:
        plt.gca().set_aspect('equal')

    if mytitle is not None:
        plt.title(mytitle, fontsize=20)

    if xlim is not None:
        plt.xlim(xlim)

    if ylim is not None:
        plt.ylim(ylim)

    return pp
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:fingerprint-securedrop    作者:freedomofpress    | 项目源码 | 文件源码
def plot_ROC(test_labels, test_predictions):
    fpr, tpr, thresholds = metrics.roc_curve(
        test_labels, test_predictions, pos_label=1)
    auc = "%.2f" % metrics.auc(fpr, tpr)
    title = 'ROC Curve, AUC = '+str(auc)
    with plt.style.context(('ggplot')):
        fig, ax = plt.subplots()
        ax.plot(fpr, tpr, "#000099", label='ROC curve')
        ax.plot([0, 1], [0, 1], 'k--', label='Baseline')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.legend(loc='lower right')
        plt.title(title)
    return fig
项目:HousePricePredictionKaggle    作者:Nuwantha    | 项目源码 | 文件源码
def get_feature_importance(list_of_features):
    n_estimators=10000
    random_state=0
    n_jobs=4
    x_train=data_frame[list_of_features]
    y_train=data_frame.iloc[:,-1]
    feat_labels= data_frame.columns[1:]
    forest = BaggingRegressor(n_estimators=n_estimators,random_state=random_state,n_jobs=n_jobs) 
    forest.fit(x_train,y_train) 
    importances=forest.feature_importances_ 
    indices = np.argsort(importances)[::-1]


    for f in range(x_train.shape[1]):
        print("%2d) %-*s %f" % (f+1,30,feat_labels[indices[f]],
                                        importances[indices[f]]))


    plt.title("Feature Importance")
    plt.bar(range(x_train.shape[1]),importances[indices],color='lightblue',align='center')
    plt.xticks(range(x_train.shape[1]),feat_labels[indices],rotation=90)
    plt.xlim([-1,x_train.shape[1]])
    plt.tight_layout()
    plt.show()
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_bar_chart(label_to_value, title, x_label, y_label):
    """
    Plots a bar chart from a dict.

    Args:
        label_to_value: A dict mapping ints or strings to numerical values (int
            or float).
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    n = len(label_to_value)
    labels = sorted(label_to_value.keys())
    values = [label_to_value[label] for label in labels]
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.bar(range(n), values, align='center')
    plt.xticks(range(n), labels, rotation='vertical', fontsize='7')
    plt.gcf().subplots_adjust(bottom=0.2) # make room for x-axis labels
    plt.show()
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_line_graph_multiple_lines(x, label_to_values, title, x_label, y_label):
    if not all(len(x) == len(values) for values in label_to_values.values()):
        raise ValueError('values of label_to_values must have length len(x)')
    colors = ['b','g','r','c','m','y','k']
    line_styles = ['-','--',':']
    for (i, label) in enumerate(sorted(label_to_values.keys())):
        color = colors[i%len(colors)]
        line_style = line_styles[(i//len(colors))%len(line_styles)]
        plt.plot(x,
                 label_to_values[label],
                 label=label,
                 color=color,
                 linestyle=line_style)
    plt.legend(loc='center left', bbox_to_anchor=(1,0.5), prop={'size':9})
    plt.tight_layout(pad=9)
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.show()

# x_min, x_max for example proportion_initiated_by_user
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_histogram(x, n_bins, title, x_label, y_label):
    """
    Plots a histogram from a list of data.

    Args:
        x: A list of floats representing the data.
        n_bins: An int representing the number of bins to plot.
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.hist(x, bins=n_bins)
    plt.show()

    # probability
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def create_graph():
    logfile = 'result/log'
    xs = []
    ys = []
    ls = []
    f = open(logfile, 'r')
    data = json.load(f)

    print(data)

    for d in data:
        xs.append(d["iteration"])
        ys.append(d["main/accuracy"])
        ls.append(d["main/loss"])

    plt.clf()
    plt.cla()
    plt.hlines(1, 0, np.max(xs), colors='r', linestyles="dashed")  # y=-1, 1??????
    plt.title(r"loss/accuracy")
    plt.plot(xs, ys, label="accuracy")
    plt.plot(xs, ls, label="loss")
    plt.legend()
    plt.savefig("result/log.png")
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def plot_hist(baseline_samples, target_samples, true_x, true_y):
    baseline_samples = baseline_samples.squeeze()
    target_samples = target_samples.squeeze()

    bmin, bmax = baseline_samples.min(), baseline_samples.max()

    ax = sns.kdeplot(baseline_samples, shade=True, color=(0.6, 0.1, 0.1, 0.2))
    ax = sns.kdeplot(target_samples, shade=True, color=(0.1, 0.1, 0.6, 0.2))
    ax.set_xlim(bmin, bmax)

    y0, y1 = ax.get_ylim()

    plt.plot([true_y, true_y], [0, y1 - (y1 - y0) * 0.01], linewidth=1, color='r')
    plt.title('Predictive' + (f' at {true_x:.2f}' if true_x is not None else ''))

    fig = plt.gcf()
    fig.set_size_inches(9, 9)
    # plt.tight_layout()  # pad=0.4, w_pad=0.5, h_pad=1.0)

    name = utils.DATA_DIR.replace('/', '-')
    # plt.tight_layout(pad=0.6)
    utils.save_fig('predictive-at-point-' + name)
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def plot_spectra(results):
    plt.figure(figsize=(10, 4))
    plt.imshow(
        np.concatenate(
            [np.flipud(results['x'].T),
             np.flipud(results['xh'].T),
             np.flipud(results['x_conv'].T)],
            0),
        aspect='auto',
        cmap='jet',
    )
    plt.colorbar()
    plt.title('Upper: Real input; Mid: Reconstrution; Lower: Conversion to target.')
    plt.savefig(
        os.path.join(
            args.logdir,
            '{}.png'.format(
                os.path.split(str(results['f'], 'utf-8'))[-1]
            )
        )
    )
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet):
    target_names = map(lambda key: key.replace('_','-'), clf_target_names)

    for idx in range(len(cm)): 
        cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    # plt.matshow(cm)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(clf_target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    # plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def plot_axes_scaling(self, iabscissa=1):
        from matplotlib import pyplot
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
            pyplot.text(0, dat.D[-1, 5],
                        'all axes scaling values equal to %s'
                        % str(dat.D[-1, 5]),
                        verticalalignment='center')
            return self  # nothing interesting to plot
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        # pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None):
    fig=plt.figure()
    plt.scatter(x,y)
    plt.title(title)
    if xlabel:
        plt.xlabel(xlabel)
    if ylabel:
        plt.ylabel(ylabel)

    if not 0<=np.min(x)<=np.max(x)<=1:
        raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\
                         min:',np.min(x),' max:',np.max(x))
    if not 0<=np.min(y)<=np.max(y)<=1:
        raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\
                         min:',np.min(y),' max:',np.max(y))

    plt.xlim([0,1])
    plt.ylim([0,1])
    return fig
项目:mx-lsoftmax    作者:luoyetx    | 项目源码 | 文件源码
def plot_beta():
    '''plot beta over training
    '''
    beta = args.beta
    scale = args.scale
    beta_min = args.beta_min
    num_epoch = args.num_epoch
    epoch_size = int(float(args.num_examples) / args.batch_size)

    x = np.arange(num_epoch*epoch_size)
    y = beta * np.power(scale, x)
    y = np.maximum(y, beta_min)
    epoch_x = np.arange(num_epoch) * epoch_size
    epoch_y = beta * np.power(scale, epoch_x)
    epoch_y = np.maximum(epoch_y, beta_min)

    # plot beta descent curve
    plt.semilogy(x, y)
    plt.semilogy(epoch_x, epoch_y, 'ro')
    plt.title('beta descent')
    plt.ylabel('beta')
    plt.xlabel('epoch')
    plt.show()
项目:SGAN    作者:YuhangSong    | 项目源码 | 文件源码
def plt_to_vis(fig,win,name):
    canvas=fig.canvas
    import io
    buf = io.BytesIO()
    canvas.print_png(buf)
    data=buf.getvalue()
    buf.close()

    buf=io.BytesIO()
    buf.write(data)
    img=Image.open(buf)
    img = np.asarray(img)/255.0
    img = img.astype(float)[:,:,0:3]
    img = torch.FloatTensor(img).permute(2,0,1)
    vis.image(  img,
                win=str(MULTI_RUN)+'-'+win,
                opts=dict(title=str(MULTI_RUN)+'-'+name))
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def get_pub_dic_xml(file_name = 'data/proton-beam-all.xml'):
    tree = ET.parse(file_name)
    root = tree.getroot()[0]

    # Create dic of : id -> text features
    pub_dic = {}
    for pub in root:
        rec_number = int (get_text (pub.find('rec-number')))
        abstract   = get_text (pub.find('abstract'))
        title      = get_text (pub.find('titles')[0])
        text = title + abstract
        for kw in pub.find('keywords'):
            text = text + kw.text + ' '
        pub_dic[rec_number] = text

    return pub_dic
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def get_pub_dic_csv(dataset):
    filename = "data/" + dataset + "-text.csv"
    f = open(filename)
    f.readline()
    csv_reader = csv.reader(f)

    # Create dic of : id -> text features
    pub_dic = {}

    for row in csv_reader:
        if dataset.startswith("RCT"):
            (abstract_id, abstract, title) = tuple(row)[0:3]
        else:
            (abstract_id, title, publisher, abstract) = tuple(row)[0:4]

        abstract_id = int(abstract_id)
        text = title + abstract

        pub_dic[abstract_id] = text

    return pub_dic
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_cmc(cmcs, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'lower right'
  # open new page for current plot
  figure = pyplot.figure()

  max_R = 0
  # plot the CMC curves
  for i in range(len(cmcs)):
    probs = bob.measure.cmc(cmcs[i])
    R = len(probs)
    pyplot.semilogx(range(1, R+1), probs, figure=figure, color=colors[i], label=labels[i])
    max_R = max(R, max_R)

  # change axes accordingly
  ticks = [int(t) for t in pyplot.xticks()[0]]
  pyplot.xlabel('Rank')
  pyplot.ylabel('Probability')
  pyplot.xticks(ticks, [str(t) for t in ticks])
  pyplot.axis([0, max_R, -0.01, 1.01])
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_epc(scores_dev, scores_eval, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'upper center'
  # open new page for current plot
  figure = pyplot.figure()

  # plot the DET curves
  for i in range(len(scores_dev)):
    x,y = bob.measure.epc(scores_dev[i][0], scores_dev[i][1], scores_eval[i][0], scores_eval[i][1], 100)
    pyplot.plot(x, y, color=colors[i], label=labels[i])

  # change axes accordingly
  pyplot.xlabel('alpha')
  pyplot.ylabel('HTER')
  pyplot.title(title)
  pyplot.axis([-0.01, 1.01, -0.01, 0.51])
  pyplot.grid(True)
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:fingerprint-securedrop    作者:freedomofpress    | 项目源码 | 文件源码
def plot_feature_importances(feature_names, feature_importances, N=30):
    importances = list(zip(feature_names, list(feature_importances)))
    importances = pd.DataFrame(importances, columns=["Feature", "Importance"])
    importances = importances.set_index("Feature")

    # Sort by the absolute value of the importance of the feature
    importances["sort"] = abs(importances["Importance"])
    importances = importances.sort(columns="sort", ascending=False).drop("sort", axis=1)
    importances = importances[0:N]

    # Show the most important positive feature at the top of the graph
    importances = importances.sort(columns="Importance", ascending=True)

    with plt.style.context(('ggplot')):
        fig, ax = plt.subplots(figsize=(16,12))
        ax.tick_params(labelsize=16)
        importances.plot(kind="barh", legend=False, ax=ax)
        ax.set_frame_on(False)
        ax.set_xlabel("Relative importance", fontsize=20)
        ax.set_ylabel("Feature name", fontsize=20)
    plt.tight_layout()
    plt.title("Most important features for attack", fontsize=20).set_position([.5, 0.99])
    return fig
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def plot_grid2D(lons, lats, tec_grid2D, datetime, title_label = ''):

    LATS, LONS = np.meshgrid(lats, lons)

    m = Basemap(llcrnrlon=-180,
                llcrnrlat=-55,
                urcrnrlon=180,
                urcrnrlat=75,
                projection='merc',
                area_thresh=1000,
                resolution='i')

    m.drawstates()
    m.drawcountries()
    m.drawcoastlines()

    parallels = np.arange(-90,90,20)
    m.drawparallels(parallels,labels=[True,False,False,True])
    meridians = np.arange(0,360,40)
    m.drawmeridians(meridians,labels=[True,False,False,True])

    m.scatter(LONS, LATS, c=tec_grid2D, latlon = True, linewidths=0, s=5)
    m.colorbar()

    plt.title('%s\n%s' % (title_label, datetime.isoformat(' ')))
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, col, title, cmap=plt.cm.viridis):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    for i in range(cm.shape[0]):
        plt.annotate("%.2f" %cm[i][i],xy=(i,i),
                    horizontalalignment='center',
                    verticalalignment='center')
    plt.title(title,fontsize=18)
    plt.colorbar(fraction=0.046, pad=0.04)
    tick_marks = np.arange(len(col.unique()))
    plt.xticks(tick_marks, sorted(col.unique()),rotation=90)
    plt.yticks(tick_marks, sorted(col.unique()))
    plt.tight_layout()
    plt.ylabel('True label',fontsize=18)
    plt.xlabel('Predicted label',fontsize=18)

#using flavor network to project recipes from ingredient matrix to flavor matrix
项目:AnomalyDetection    作者:JayZhuCoding    | 项目源码 | 文件源码
def plot_training_parameters(self):
        fr = open("training_param.csv", "r")
        fr.readline()
        lines = fr.readlines()
        fr.close()
        n = 100
        nu = np.empty(n, dtype=np.float64)
        gamma = np.empty(n, dtype=np.float64)
        diff = np.empty([n, n], dtype=np.float64)
        for row in range(len(lines)):
            m = lines[row].strip().split(",")
            i = row / n
            j = row % n
            nu[i] = Decimal(m[0])
            gamma[j] = Decimal(m[1])
            diff[i][j] = Decimal(m[2])
        plt.pcolor(gamma, nu, diff, cmap="coolwarm")
        plt.title("The Difference of Guassian Classifier with Different nu, gamma")
        plt.xlabel("gamma")
        plt.ylabel("nu")
        plt.xscale("log")
        plt.yscale("log")
        plt.colorbar()
        plt.show()
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def show(self):
        keys = []
        values = []
        for (k, v) in self.letter_db.iteritems():
            total = v['total']
            right = v['right']
            keys.append(k)
            values.append(100 * float(right / float(total)))

        groups = len(self.letter_db)
        index = np.arange(groups)
        width = 0.5
        opacity = 0.4

        plt.bar(index, values, linewidth = width, alpha = opacity, color = 'b', label = 'right rate')

        plt.xlabel('letter')
        plt.ylabel('predict rgith rate (%)')
        plt.title('Writer identify: letter right rate')
        plt.xticks(index + width, keys)
        plt.ylim(0, 100)
        plt.legend()
        plt.show()
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def draw_results(self):
        metrics_log, cost_log = {}, {}
        for key, value in sorted(self._logs.items()):
            metrics_log[key], cost_log[key] = value[:-1], value[-1]

        for i, name in enumerate(sorted(self._metric_names)):
            plt.figure()
            plt.title("Metric Type: {}".format(name))
            for key, log in sorted(metrics_log.items()):
                xs = np.arange(len(log[i])) + 1
                plt.plot(xs, log[i], label="Data Type: {}".format(key))
            plt.legend(loc=4)
            plt.show()
            plt.close()

        plt.figure()
        plt.title("Cost")
        for key, loss in sorted(cost_log.items()):
            xs = np.arange(len(loss)) + 1
            plt.plot(xs, loss, label="Data Type: {}".format(key))
        plt.legend()
        plt.show()
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def get_graphs_from_logs():
        with open("Results/logs.dat", "rb") as file:
            logs = pickle.load(file)
        for (hus, ep, bt), log in logs.items():
            hus = list(map(lambda _c: str(_c), hus))
            title = "hus: {} ep: {} bt: {}".format(
                "- " + " -> ".join(hus) + " -", ep, bt
            )
            fb_log, acc_log = log["fb_log"], log["acc_log"]
            xs = np.arange(len(fb_log)) + 1
            plt.figure()
            plt.title(title)
            plt.plot(xs, fb_log)
            plt.plot(xs, acc_log, c="g")
            plt.savefig("Results/img/" + "{}_{}_{}".format(
                "-".join(hus), ep, bt
            ))
            plt.close()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_cdf_model_and_meansh(self, cdfs, tag, cdf0_1s, aucs, bx, dx):
        plt.close("all")
        x = np.arange(0, bx, dx)
        fig, ax = plt.subplots(nrows=1, ncols=1)
        ax.plot(x, cdfs[0], label="CDF model")
        ax.plot(x, cdfs[1], label="CDF mean shape")
        ax.grid(True)
        plt.xlabel("NRMSE")
        plt.ylabel("Data proportion")
        plt.legend(loc=4, prop={'size': 8}, fancybox=True, shadow=True)
        plt.title(
            "CDF curve: " + tag + ". Model: CDF0.1: " +
            str(prec2 % cdf0_1s[0]) + " . AUC:" + str(prec2 % aucs[0]) +
            ".\n" + ". MSh: CDF0.1: " +
            str(prec2 % cdf0_1s[1]) + " . AUC:" + str(prec2 % aucs[1]) + ".\n")
        return fig
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_errors(self, valid, train, path, epoc=-1):
        '''Plot then save the figure of the error over the valid and train sets calculated during  the gradient descent. ***** CLASSIFICATION

        The figure won't be displayed.  
        valid: list of errors over the validation set
        train: list of errors over the train set
        path: path where to save the figure.
        '''
        fig = plt.figure()
        train_gp, = plt.plot(train, '-r')
        valid_gp, = plt.plot(valid, '-*g')
        if epoc >= 0:
            epoc = epoc - 1 # ploting starts from 0
            stop, = plt.plot([epoc, epoc], [0, max(valid + train) + 5], '--b', lw=2)
            plt.legend([train_gp, valid_gp, stop], ['train error', 'valid error', 'stop learning, epoch='+str(epoc + 1)], fancybox=True, shadow=True)
        else:
            plt.legend([train_gp, valid_gp], ['train error', 'valid error'], fancybox=True, shadow=True)

        plt.title('Train/valid error during the gradient descent')
        plt.xlabel(u"n° epoch")
        plt.ylabel('Error (100 - accuracy) %')
        fig.savefig(path, bbox_inches='tight')
        # to display the figure
        #plt.show()
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()
项目:audio_scripts    作者:audiofilter    | 项目源码 | 文件源码
def save_fft(fil,audio_in):
    samples = len(audio_in)
    fft_size = 2**int(floor(log(samples)/log(2.0)))
    freq = fft(audio_in[0:fft_size])
    s_data = numpy.zeros(fft_size/2)
    x_data = numpy.zeros(fft_size/2)
    peak = 0;
    for j in xrange(fft_size/2):
        if (abs(freq[j]) > peak):
            peak = abs(freq[j])

    for j in xrange(fft_size/2):
        x_data[j] = log(2.0*(j+1.0)/fft_size);
        if (x_data[j] < -10):
            x_data[j] = -10
        s_data[j] = 10.0*log(abs(freq[j])/peak)/log(10.0)
    plt.ylim([-50,0])
    plt.plot(x_data,s_data)
    plt.title('fft log power')
    plt.grid()

    fields = fil.split('.')
    plt.savefig(fields[0]+'_fft.png', bbox_inches="tight")
    plt.clf()
    plt.close()
项目:kmeans-service    作者:MAYHEM-Lab    | 项目源码 | 文件源码
def plot_spatial_cluster_fig(data, covar_type_tied_labels_k):
    """ Creates a 3x2 plot spatial plot using labels as the color """
    sns.set(context='talk', style='white')
    data.columns = [c.lower() for c in data.columns]
    fig = plt.figure()
    placement = {'full': {True: 1, False: 4}, 'diag': {True: 2, False: 5}, 'spher': {True: 3, False: 6}}

    lim_left = data['longitude'].min()
    lim_right = data['longitude'].max()
    lim_bottom = data['latitude'].min()
    lim_top = data['latitude'].max()
    for covar_type, covar_tied, labels, k in covar_type_tied_labels_k:
        plt.subplot(2, 3, placement[covar_type][covar_tied])
        plt.scatter(data['longitude'], data['latitude'], c=labels, cmap=plt.cm.rainbow, s=10)
        plt.xlim(left=lim_left, right=lim_right)
        plt.ylim(bottom=lim_bottom, top=lim_top)
        plt.xticks([])
        plt.yticks([])
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        plt.title('{}-{}, K={}'.format(covar_type.capitalize(), ['Untied', 'Tied'][covar_tied], k))
    plt.tight_layout()
    return fig
项目:OpenAPS    作者:medicinexlab    | 项目源码 | 文件源码
def _get_old_pred(bg_df, start_index, end_index, num_pred_minutes):
    #The number of 5 minute sections until the prediction (e.g. 30 minutes = 6 sections)
    pred_array_index = num_pred_minutes / DATA_SPACING

    actual_bg_array, actual_bg_time_array, eventual_pred_array, eventual_pred_time_array, iob_pred_array, iob_pred_time_array, cob_pred_array, cob_pred_time_array, acob_pred_array, acob_pred_time_array = _get_raw_pred_array(bg_df, start_index, end_index, pred_array_index)

    eventual_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, eventual_pred_array, eventual_pred_time_array, 30)
    iob_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, iob_pred_array, iob_pred_time_array, num_pred_minutes)
    cob_pred_data= _find_compare_array(actual_bg_array, actual_bg_time_array, cob_pred_array, cob_pred_time_array, num_pred_minutes)
    acob_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, acob_pred_array, acob_pred_time_array, num_pred_minutes)

    return eventual_pred_data, iob_pred_data, cob_pred_data, acob_pred_data


#Plots old pred data given namedtuple of old data (eventualBG, acob, cob, or iob).
#Can show or save prediction plot based on show_pred_plot or save_pred_plot, respectively.
#Same goes for the Clarke Error grid with show_clarke_plot or save_clarke_plot, respectively.
#id_str, algorithm_str, minutes_str are strings of the ID, the prediction algorithm and the number of prediction minutes used for the title.
项目:OpenAPS    作者:medicinexlab    | 项目源码 | 文件源码
def _plot_old_pred_data(old_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, algorithm_str, minutes_str):
    actual_bg_array = old_pred_data.result_actual_bg_array
    actual_bg_time_array = old_pred_data.result_actual_bg_time_array
    pred_array = old_pred_data.result_pred_array
    pred_time_array = old_pred_data.result_pred_time_array

    #Root mean squared error
    rms = math.sqrt(metrics.mean_squared_error(actual_bg_array, pred_array))
    print "                Root Mean Squared Error: " + str(rms)
    print "                Mean Absolute Error: " + str(metrics.mean_absolute_error(actual_bg_array, pred_array))
    print "                R^2 Coefficient of Determination: " + str(metrics.r2_score(actual_bg_array, pred_array))

    plot, zone = ClarkeErrorGrid.clarke_error_grid(actual_bg_array, pred_array, id_str + " " + algorithm_str + " " + minutes_str)
    print "                Percent A:{}".format(float(zone[0]) / (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Percent C, D, E:{}".format(float(zone[2] + zone[3] + zone[4])/ (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Zones are A:{}, B:{}, C:{}, D:{}, E:{}\n".format(zone[0],zone[1],zone[2],zone[3],zone[4])
    if save_clarke_plot: plt.savefig(id_str + algorithm_str.replace(" ", "") + minutes_str + "clarke.png")
    if show_clarke_plot: plot.show()

    plt.clf()
    plt.plot(actual_bg_time_array, actual_bg_array, label="Actual BG", color='black', linestyle='-')
    plt.plot(pred_time_array, pred_array, label="BG Prediction", color='black', linestyle=':')
    plt.title(id_str + " " + algorithm_str + " " + minutes_str + " BG Analysis")
    plt.ylabel("Blood Glucose Level (mg/dl)")
    plt.xlabel("Time (minutes)")
    plt.legend(loc='upper left')

    # SHOW/SAVE PLOT DEPENDING ON THE BOOLEAN PARAMETER
    if save_pred_plot: plt.savefig(id_str + algorithm_str.replace(" ","") + minutes_str + "plot.png")
    if show_pred_plot: plt.show()


#Function to analyze the old OpenAPS data
项目:LinearCorex    作者:gregversteeg    | 项目源码 | 文件源码
def plot_heatmaps(data, mis, column_label, cont, topk=30, prefix=''):
    cmap = sns.cubehelix_palette(as_cmap=True, light=.9)
    m, nv = mis.shape
    for j in range(m):
        inds = np.argsort(- mis[j, :])[:topk]
        if len(inds) >= 2:
            plt.clf()
            order = np.argsort(cont[:,j])
            subdata = data[:, inds][order].T
            subdata -= np.nanmean(subdata, axis=1, keepdims=True)
            subdata /= np.nanstd(subdata, axis=1, keepdims=True)
            columns = [column_label[i] for i in inds]
            sns.heatmap(subdata, vmin=-3, vmax=3, cmap=cmap, yticklabels=columns, xticklabels=False, mask=np.isnan(subdata))
            filename = '{}/heatmaps/group_num={}.png'.format(prefix, j)
            if not os.path.exists(os.path.dirname(filename)):
                os.makedirs(os.path.dirname(filename))
            plt.title("Latent factor {}".format(j))
            plt.yticks(rotation=0)
            plt.savefig(filename, bbox_inches='tight')
            plt.close('all')
            #plot_rels(data[:, inds], map(lambda q: column_label[q], inds), colors=cont[:, j],
            #          outfile=prefix + '/relationships/group_num=' + str(j), latent=labels[:, j], alpha=0.1)
项目:LinearCorex    作者:gregversteeg    | 项目源码 | 文件源码
def plot_top_relationships(data, corex, labels, column_label, topk=5, prefix=''):
    dual = (corex.moments['X_i Y_j'] * corex.moments['X_i Z_j']).T
    alpha = dual > 0.04
    cy = corex.moments['ry']
    m, nv = alpha.shape
    for j in range(m):
        inds = np.where(alpha[j] > 0)[0]
        inds = inds[np.argsort(- dual[j][inds])][:topk]
        if len(inds) >= 2:
            if dual[j, inds[0]] > 0.1:
                factor = labels[:, j]
                title = '$Y_{%d}$' % j
            else:
                k = np.argmax(np.abs(cy[j]))
                if k == j:
                    k = np.argsort(-np.abs(cy[j]))[1]
                factor = corex.moments['X_i Z_j'][inds[0], j] * labels[:, j] + corex.moments['X_i Z_j'][inds[0], k] * labels[:, k]
                title = '$Y_{%d} + Y_{%d}$' % (j, k)
            plot_rels(data[:, inds], map(lambda q: column_label[q], inds), colors=factor,
                      outfile=prefix + '/relationships/group_num=' + str(j), title=title)
项目:stock-eagle    作者:mtusman    | 项目源码 | 文件源码
def get_stock(symbol):
    last_year_date = datetime.strftime(datetime.now() - relativedelta(years=1), "%Y-%m-%d")
    date = get_last_trading_date()
    url = requests.get('https://www.quandl.com/api/v3/datasets/WIKI/{}.json?start_date={}&end_date={}'.format(symbol, last_year_date, date))
    json_dataset = url.json()
    json_data = json_dataset['dataset']['data']
    dates = []  
    closing = []
    for day in json_data:
        dates.append(datetime.strptime(day[0], "%Y-%m-%d"))
        closing.append(day[4])
    plt.plot_date(dates, closing, '-')
    plt.title(symbol)
    plt.xlabel('Date')
    plt.ylable('Stock Price')
    plt.savefig('foo.png')
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def plot_learning_curve(_, history, folder, debug=True):
    arr = np.asarray(
        map(lambda k: [k['epoch'], k['train_loss'], k['valid_loss']], history))
    plt.figure()
    plt.plot(arr[:, 0], arr[:, 1], 'r', marker='o',
             label='Training loss', linewidth=2.0)
    plt.plot(arr[:, 0], arr[:, 2], 'b', marker='o',
             label='Validation loss', linewidth=2.0)
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.ylim([0.0, np.max(arr[:, 1:]) * 1.3])
    plt.title('Learning curve')
    plt.legend()
    if not debug:
        plt.savefig('%s/learning_curve.png' % folder, bbox_inches='tight')
        plt.close()
项目:ndparse    作者:neurodata    | 项目源码 | 文件源码
def display_pr_curve(precision, recall):
    # following examples from sklearn

    # TODO:  f1 operating point

    import pylab as plt
    # Plot Precision-Recall curve
    plt.clf()
    plt.plot(recall, precision, label='Precision-Recall curve')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('Precision-Recall example: Max f1={0:0.2f}'.format(max_f1))
    plt.legend(loc="lower left")
    plt.show()
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def traffic_districution(self):
        data_dir = g_singletonDataFilePath.getTrainDir()
        df = self.load_trafficdf(data_dir)
        print df['traffic'].describe()
#         sns.distplot(self.gapdf['gap'],kde=False, bins=100);
        df['traffic'].plot(kind='hist', bins=100)
        plt.xlabel('Traffic')
        plt.title('Histogram of Traffic')

        return
#     def disp_gap_bydistrict(self, disp_ids = np.arange(34,67,1), cls1 = 'start_district_id', cls2 = 'time_id'):
# #         disp_ids = np.arange(1,34,1)
#         plt.figure()
#         by_district = self.gapdf.groupby(cls1)
#         size = len(disp_ids)
# #         size = len(by_district)
#         col_len = row_len = math.ceil(math.sqrt(size))
#         count = 1
#         for name, group in by_district:
#             if not name in disp_ids:
#                 continue
#             plt.subplot(row_len, col_len, count)
#             group.groupby(cls2)['gap'].mean().plot()
#             count += 1   
#         return
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def disp_gap_byweather(self):
        df = self.gapdf
        data_dir = g_singletonDataFilePath.getTrainDir()
        dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_prevweather.df.pickle'
        dumpload = DumpLoad(dumpfile_path)
        if dumpload.isExisiting():
            temp_df = dumpload.load()
        else:
            weather_dict = self.get_weather_dict(data_dir)

            temp_df = self.X_y_Df['time_slotid'].apply(self.find_prev_weather_mode, weather_dict=weather_dict)     
            dumpload.dump(temp_df)

        df = pd.concat([df, temp_df],  axis=1)

        gaps_mean = df.groupby('preweather')['gap'].mean()
        gaps_mean.plot(kind='bar')
        plt.ylabel('Mean of gap')
        plt.xlabel('Weather')
        plt.title('Weather/Gap Correlation')
        return
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def disp_gap_bydate(self):
        gaps_mean = self.gapdf.groupby('time_date')['gap'].mean()
        gaps_mean.plot(kind='bar')
        plt.ylabel('Mean of gap')
        plt.title('Date/Gap Correlation')
#         for i in gaps_mean.index:
#             plt.plot([i,i], [0, gaps_mean[i]], 'k-')
        plt.show()
        return

#     def drawGapDistribution(self):
#         self.gapdf[self.gapdf['gapdf'] < 10]['gapdf'].hist(bins=50)
# #         sns.distplot(self.gapdf['gapdf']);
# #         sns.distplot(self.gapdf['gapdf'], hist=True, kde=False, rug=False)
# #         plt.hist(self.gapdf['gapdf'])
#         plt.show()
#         return
#     def drawGapCorrelation(self):
#         _, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
#         res = self.gapdf.groupby('start_district_id')['gapdf'].sum()
#         ax1.bar(res.index, res.values)
#         res = self.gapdf.groupby('time_slotid')['gapdf'].sum()
#         ax2.bar(res.index.map(lambda x: x[11:]), res.values)
#         plt.show()
#         return
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def plot_one_metric(self, models_metric, title):
        """

        :param models_metric:
        :param title:
        :return:
        """
        for index, model_metric in enumerate(models_metric):
            plt.plot(self.steps, model_metric, label=self.file_desc[index])
        plt.title(title)
        plt.legend()
        plt.xlabel('Number of batches')
        plt.ylabel('Score')
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_trajectories(src_sent, src_encoding, idx):

    # encoding is (time_steps, hidden_dim)
    #pca = PCA(n_components=1)

    #pca_result = pca.fit_transform(src_encoding)
    times = np.arange(src_encoding.shape[0])
    plt.plot(times, src_encoding)
    plt.title(" ".join(src_sent))
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.savefig("misc_hidden_cell_trajectories_"+str(idx), bbox_inches="tight")
    plt.close()