Python matplotlib.pyplot 模块,xlabel() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.xlabel()

项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_sent_trajectories(sents, decode_plot):

    font = {'family' : 'normal',
            'size'   : 14}

    matplotlib.rc('font', **font) 
    i = 0    
    l = ["Portuguese","Catalan"]

    axes = plt.gca()
    #axes.set_xlim([xmin,xmax])
    axes.set_ylim([-1,1])

    for sent, enc in zip(sents, decode_plot):
    if i==2: continue
        i += 1
        #times = np.arange(len(enc))
        times = np.linspace(0,1,len(enc))
        plt.plot(times, enc, label=l[i-1])
    plt.title("Hidden Node Trajectories")
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.legend(loc='best')
    plt.savefig("final_tests/cr_por_cat_hidden_cell_trajectories", bbox_inches="tight")
    plt.close()
项目:fingerprint-securedrop    作者:freedomofpress    | 项目源码 | 文件源码
def plot_ROC(test_labels, test_predictions):
    fpr, tpr, thresholds = metrics.roc_curve(
        test_labels, test_predictions, pos_label=1)
    auc = "%.2f" % metrics.auc(fpr, tpr)
    title = 'ROC Curve, AUC = '+str(auc)
    with plt.style.context(('ggplot')):
        fig, ax = plt.subplots()
        ax.plot(fpr, tpr, "#000099", label='ROC curve')
        ax.plot([0, 1], [0, 1], 'k--', label='Baseline')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.legend(loc='lower right')
        plt.title(title)
    return fig
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def plot_interpolation(orderx,ordery):
    s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
                                ordery,YMIN,YMAX)
    Xc,Yc = s.get_x2d()
    x = np.linspace(XMIN,XMAX,100)
    y = np.linspace(YMIN,YMAX,100)
    Xf,Yf = np.meshgrid(x,y,indexing='ij')
    f_coarse = f(Xc,Yc)
    f_interpolator = s.to_continuum(f_coarse)
    f_num = f_interpolator(Xf,Yf)
    plt.pcolor(Xf,Yf,f_num)
    cb = plt.colorbar()
    cb.set_label('interpolated function',fontsize=16)
    plt.xlabel('x')
    plt.ylabel('y')
    for postfix in ['.png','.pdf']:
        name = 'orthopoly_interpolated_function'+postfix
        if USE_FIGS_DIR:
            name = 'figs/' + name
        plt.savefig(name,
                    bbox_inches='tight')
    plt.clf()
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_bar_chart(label_to_value, title, x_label, y_label):
    """
    Plots a bar chart from a dict.

    Args:
        label_to_value: A dict mapping ints or strings to numerical values (int
            or float).
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    n = len(label_to_value)
    labels = sorted(label_to_value.keys())
    values = [label_to_value[label] for label in labels]
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.bar(range(n), values, align='center')
    plt.xticks(range(n), labels, rotation='vertical', fontsize='7')
    plt.gcf().subplots_adjust(bottom=0.2) # make room for x-axis labels
    plt.show()
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_line_graph_multiple_lines(x, label_to_values, title, x_label, y_label):
    if not all(len(x) == len(values) for values in label_to_values.values()):
        raise ValueError('values of label_to_values must have length len(x)')
    colors = ['b','g','r','c','m','y','k']
    line_styles = ['-','--',':']
    for (i, label) in enumerate(sorted(label_to_values.keys())):
        color = colors[i%len(colors)]
        line_style = line_styles[(i//len(colors))%len(line_styles)]
        plt.plot(x,
                 label_to_values[label],
                 label=label,
                 color=color,
                 linestyle=line_style)
    plt.legend(loc='center left', bbox_to_anchor=(1,0.5), prop={'size':9})
    plt.tight_layout(pad=9)
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.show()

# x_min, x_max for example proportion_initiated_by_user
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_histogram(x, n_bins, title, x_label, y_label):
    """
    Plots a histogram from a list of data.

    Args:
        x: A list of floats representing the data.
        n_bins: An int representing the number of bins to plot.
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.hist(x, bins=n_bins)
    plt.show()

    # probability
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def plot_traing_info(x, ylist, path):
    """
    Loads log file and plot x and y values as provided by input.
    Saves as <path>/train_log.png
    """
    file_name = os.path.join(path, __train_log_file_name)
    try:
        with open(file_name, "rb") as f:
            log = pickle.load(f)
    except IOError:  # first time
        warnings.warn("There is no {} file here!!!".format(file_name))
        return
    plt.figure()
    x_vals = log[x]
    for y in ylist:
        y_vals = log[y]
        if len(y_vals) != len(x_vals):
            warning.warn("One of y's: {} does not have the same length as x:{}".format(y, x))
        plt.plot(x_vals, y_vals, label=y)
        # assert len(y_vals) == len(x_vals), "not the same len"
    plt.xlabel(x)
    plt.legend()
    #plt.show()
    plt.savefig(file_name[:-3]+'png', bbox_inches='tight')
    plt.close('all')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet):
    target_names = map(lambda key: key.replace('_','-'), clf_target_names)

    for idx in range(len(cm)): 
        cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    # plt.matshow(cm)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(clf_target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    # plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=True)
项目:chash    作者:luhsra    | 项目源码 | 文件源码
def plot_build_time_composition_graph(parseTimes, hashTimes, compileTimes, diffToBuildTime): # times in s
    fig, ax = plt.subplots()

    ax.stackplot(np.arange(1, len(parseTimes)+1), # x axis
#                 [parseTimes, hashTimes, compileTimes, diffToBuildTime],
                  [[i/60 for i in parseTimes], [i/60 for i in hashTimes], [i/60 for i in compileTimes], [i/60 for i in diffToBuildTime]],
                 colors=[parseColor,hashColor,compileColor,remainColor], edgecolor='none')
    plt.xlim(1,len(parseTimes))
    plt.xlabel('commits')
    plt.ylabel('time [min]')
    lgd = ax.legend([mpatches.Patch(color=remainColor),
                     mpatches.Patch(color=compileColor),
                     mpatches.Patch(color=hashColor),
                     mpatches.Patch(color=parseColor)],
                    ['remaining build time','compile time', 'hash time', 'parse time'],
                    loc='center left', bbox_to_anchor=(1, 0.5))
    fig.savefig(abs_path(BUILD_TIME_COMPOSITION_FILENAME), bbox_extra_artists=(lgd,), bbox_inches='tight')
    print_avg(parseTimes, 'parse')
    print_avg(hashTimes, 'hash')
    print_avg(compileTimes, 'compile')
    print_avg(diffToBuildTime, 'remainder')
项目:chash    作者:luhsra    | 项目源码 | 文件源码
def plotTimeMultiHistogram(parseTimes, hashTimes, compileTimes, filename): # times in ms
    bins = np.linspace(0, 5000, 50)
    data = np.vstack([parseTimes, hashTimes, compileTimes]).T
    fig, ax = plt.subplots()
    plt.hist(data, bins, alpha=0.7, label=['parsing', 'hashing', 'compiling'], color=[parseColor, hashColor, compileColor])
    plt.legend(loc='upper right')
    plt.xlabel('time [ms]')
    plt.ylabel('#files')
    fig.savefig(filename)

    fig, ax = plt.subplots()
    boxplot_data = [[i/1000 for i in parseTimes], [i/1000 for i in hashTimes], [i/1000 for i in compileTimes]] # times to s
    plt.boxplot(boxplot_data, 0, 'rs', 0, [5, 95])
    plt.xlabel('time [s]')
    plt.yticks([1, 2, 3], ['parsing', 'hashing', 'compiling'])
    #lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # legend on the right
    fig.savefig(filename[:-4] + '_boxplots' + GRAPH_EXTENSION)
项目:chash    作者:luhsra    | 项目源码 | 文件源码
def plot_build_time_composition_graph(parse_times, hash_times, compile_times, diff_to_build_time): # times in ns
    fig, ax = plt.subplots()
#[i/1e6 for i in parse_times],
    ax.stackplot(np.arange(1, len(parse_times)+1), # x axis
                 [[i/1e6 for i in parse_times], [i/1e6 for i in hash_times],[i/1e6 for i in compile_times], # ns to ms
                #diff_to_build_time
                ], colors=[parse_color,hash_color,compile_color,
                 #   remain_color
                ], edgecolor='none')
    plt.xlim(1,len(parse_times))
    plt.xlabel('commits')
    plt.ylabel('time [ms]')
    ax.set_yscale('log')
    lgd = ax.legend([#mpatches.Patch(color=remain_color),
                     mpatches.Patch(color=compile_color),
                     mpatches.Patch(color=hash_color),
                     mpatches.Patch(color=parse_color)],
                    [#'remaining build time',
                    'compile time', 'hash time', 'parse time'],
                    loc='center left', bbox_to_anchor=(1, 0.5))
    fig.savefig(abs_path(BUILD_TIME_FILENAME), bbox_extra_artists=(lgd,), bbox_inches='tight')



################################################################################
项目:snake    作者:rhinech    | 项目源码 | 文件源码
def load_data():
    """Draw the Mott lobes."""

    res = np.load(r'data_%d.npy' % GRID_SIZE)
    x = res[:, 0]
    y = res[:, 1]
    z = []
    for i, entry in enumerate(res):
        z.append(kinetic_energy(entry[2:], -1.))
    plt.pcolor(
        np.reshape(x, (GRID_SIZE, GRID_SIZE)),
        np.reshape(y, (GRID_SIZE, GRID_SIZE)),
        np.reshape(z, (GRID_SIZE, GRID_SIZE))
    )
    plt.xlabel('$dt/U$')
    plt.ylabel('$\mu/U$')
    plt.show()
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def plot_ecdf(x, y, xlabel='attribute', legend='x'):
    """
    Plot distribution ECDF
    x should be sorted, y typically from 1/len(x) to 1

    TODO: function should be improved to plot multiple overlayed ecdfs
    """
    plt.plot(x, y, marker='.', linestyle='none')

    # Make nice margins
    plt.margins(0.02)

    # Annotate the plot
    plt.legend((legend,), loc='lower right')
    _ = plt.xlabel(xlabel)
    _ = plt.ylabel('ECDF')

    # Display the plot
    plt.show()
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None):
    fig=plt.figure()
    plt.scatter(x,y)
    plt.title(title)
    if xlabel:
        plt.xlabel(xlabel)
    if ylabel:
        plt.ylabel(ylabel)

    if not 0<=np.min(x)<=np.max(x)<=1:
        raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\
                         min:',np.min(x),' max:',np.max(x))
    if not 0<=np.min(y)<=np.max(y)<=1:
        raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\
                         min:',np.min(y),' max:',np.max(y))

    plt.xlim([0,1])
    plt.ylim([0,1])
    return fig
项目:mx-lsoftmax    作者:luoyetx    | 项目源码 | 文件源码
def plot_beta():
    '''plot beta over training
    '''
    beta = args.beta
    scale = args.scale
    beta_min = args.beta_min
    num_epoch = args.num_epoch
    epoch_size = int(float(args.num_examples) / args.batch_size)

    x = np.arange(num_epoch*epoch_size)
    y = beta * np.power(scale, x)
    y = np.maximum(y, beta_min)
    epoch_x = np.arange(num_epoch) * epoch_size
    epoch_y = beta * np.power(scale, epoch_x)
    epoch_y = np.maximum(epoch_y, beta_min)

    # plot beta descent curve
    plt.semilogy(x, y)
    plt.semilogy(epoch_x, epoch_y, 'ro')
    plt.title('beta descent')
    plt.ylabel('beta')
    plt.xlabel('epoch')
    plt.show()
项目:machine-learning    作者:zzw0929    | 项目源码 | 文件源码
def plotBestFit(weights):
    import matplotlib.pyplot as plt
    dataMat, labelMat =  loadDataSet()
    dataArr =  array(dataMat)
    n = shape(dataArr)[0]
    xcord1 = []; ycord1 = []
    xcord2 = []; ycord2 = []
    for i in range(n):
        if int(labelMat[i]) == 1:
            xcord1.append(dataArr[i, 1]);ycord1.append(dataArr[i, 2])
        else:
            xcord2.append(dataArr[i, 1]);ycord2.append(dataArr[i, 2])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
    ax.scatter(xcord2, ycord2, s=30, c='green')
    x = arange(-3.0, 3.0, 0.1)
    y = (-weights[0]-weights[1]*x)/weights[2] # ??????
    ax.plot(x, y)
    plt.xlabel('X1');plt.ylabel('X2')
    plt.show()

# ??500???
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_gold(g1, g2, lc, p = 0):
    """
    plot sen/spe of g1 against g2
    only consider workers in lc
    """

    mv = crowd_model.mv_model(lc)
    s1 = []; s2 = []

    for w in g1.keys():
        if w in g2 and g1[w][p] != None and g2[w][p] != None and w in mv.dic_ss:
            s1.append(g1[w][p])
            s2.append(g2[w][p])

    plt.xticks((0, 0.5, 1), ("0", "0.5", "1"))
    plt.tick_params(labelsize = 25)
    plt.yticks((0, 0.5, 1), ("0", "0.5", "1"))

    plt.xlim(0,1)
    plt.ylim(0,1)
    plt.scatter(s1, s2, marker = '.', s=50, c = 'black')

    plt.xlabel('task 1 sen.', fontsize = 25)
    plt.ylabel('task 2 sen.', fontsize = 25)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_multi_err():
    """
    """
    f = open('gzoo1000000_1_2_0.2_pickle.pkl')
    res = pickle.load(f)
    sing = res[(0.5, 'single')]
    multi = res[(0.5, 'multi')]
    (g1, g2, g3, g4) = load_gold()

    a = []; b = []
    for w in multi:
        a.append(abs(g2[w][0]- sing[w][0])); b.append(abs(g2[w][0] - multi[w][0]))


    plt.xlim(0,1); plt.ylim(0,1)
    plt.scatter(a, b, marker = '.')
    plt.plot([0, 1], [0, 1], ls="-", c=".5")

    plt.xlabel('single')
    plt.ylabel('multi')
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_gold(gold):
    #plt.xlim([0.2,1])
    #plt.ylim([0.7,1])
    x = []
    y = []
    for (wid,(sen, spe, n)) in gold.items():
      if wid.startswith('S'):
        x.append(sen)
        y.append(spe)
    plt.scatter(x,y, c = 'r', marker = 'o', label = 'Novice')

    x = []; y = []
    for (wid,(sen, spe, n)) in gold.items():
      if wid.startswith('E'):
        x.append(sen)
        y.append(spe)
    plt.scatter(x,y, c = 'b', marker = 'x', label = 'Expert')

    plt.legend(loc = 'lower left')
    plt.xlabel("Sensitivity")
    plt.ylabel("Specificity")
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def plotValResults(self, save_path=None, label=None):
        if label is not None:
            accs = self.training_val_results['acc'][label]
            aucs = self.training_val_results['auc'][label]
        else:
            accs = self.training_val_results['acc']
            aucs = self.training_val_results['auc']
        plt.figure()
        plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(accs))], accs)
        plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(aucs))], aucs)
        plt.xlabel('Training step')
        plt.ylabel('Validation accuracy')
        plt.legend(['Accuracy','AUC'])
        if save_path is None:
            plt.show()
        else:
            plt.savefig(save_path)
        plt.close()
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def plotValResults(self, save_path=None, label=None):
        if label:
            accs = self.training_val_results_per_task['acc'][label]
            aucs = self.training_val_results_per_task['auc'][label]
        else:
            accs = self.training_val_results['acc']
            aucs = self.training_val_results['auc']
        plt.figure()
        plt.plot([i * self.accuracy_logged_every_n for i in range(len(accs))], accs)
        plt.plot([i * self.accuracy_logged_every_n for i in range(len(aucs))], aucs)
        plt.xlabel('Training step')
        plt.ylabel('Validation accuracy')
        plt.legend(['Accuracy','AUC'])
        if save_path is None:
            plt.show()
        else:
            plt.savefig(save_path)
项目:GANGogh    作者:rkjones4    | 项目源码 | 文件源码
def flush():
    prints = []

    for name, vals in _since_last_flush.items():
        prints.append("{}\t{}".format(name, np.mean(list(vals.values()))))
        _since_beginning[name].update(vals)

        x_vals = np.sort(list(_since_beginning[name].keys()))
        y_vals = [_since_beginning[name][x] for x in x_vals]

        plt.clf()
        plt.plot(x_vals, y_vals)
        plt.xlabel('iteration')
        plt.ylabel(name)
        plt.savefig('generated/'+name.replace(' ', '_')+'.jpg')

    print("iter {}\t{}".format(_iter[0], "\t".join(prints)))
    _since_last_flush.clear()

    with open('log.pkl', 'wb') as f:
        pickle.dump(dict(_since_beginning), f, 4)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_cmc(cmcs, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'lower right'
  # open new page for current plot
  figure = pyplot.figure()

  max_R = 0
  # plot the CMC curves
  for i in range(len(cmcs)):
    probs = bob.measure.cmc(cmcs[i])
    R = len(probs)
    pyplot.semilogx(range(1, R+1), probs, figure=figure, color=colors[i], label=labels[i])
    max_R = max(R, max_R)

  # change axes accordingly
  ticks = [int(t) for t in pyplot.xticks()[0]]
  pyplot.xlabel('Rank')
  pyplot.ylabel('Probability')
  pyplot.xticks(ticks, [str(t) for t in ticks])
  pyplot.axis([0, max_R, -0.01, 1.01])
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_epc(scores_dev, scores_eval, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'upper center'
  # open new page for current plot
  figure = pyplot.figure()

  # plot the DET curves
  for i in range(len(scores_dev)):
    x,y = bob.measure.epc(scores_dev[i][0], scores_dev[i][1], scores_eval[i][0], scores_eval[i][1], 100)
    pyplot.plot(x, y, color=colors[i], label=labels[i])

  # change axes accordingly
  pyplot.xlabel('alpha')
  pyplot.ylabel('HTER')
  pyplot.title(title)
  pyplot.axis([-0.01, 1.01, -0.01, 0.51])
  pyplot.grid(True)
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, col, title, cmap=plt.cm.viridis):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    for i in range(cm.shape[0]):
        plt.annotate("%.2f" %cm[i][i],xy=(i,i),
                    horizontalalignment='center',
                    verticalalignment='center')
    plt.title(title,fontsize=18)
    plt.colorbar(fraction=0.046, pad=0.04)
    tick_marks = np.arange(len(col.unique()))
    plt.xticks(tick_marks, sorted(col.unique()),rotation=90)
    plt.yticks(tick_marks, sorted(col.unique()))
    plt.tight_layout()
    plt.ylabel('True label',fontsize=18)
    plt.xlabel('Predicted label',fontsize=18)

#using flavor network to project recipes from ingredient matrix to flavor matrix
项目:AnomalyDetection    作者:JayZhuCoding    | 项目源码 | 文件源码
def plot_training_parameters(self):
        fr = open("training_param.csv", "r")
        fr.readline()
        lines = fr.readlines()
        fr.close()
        n = 100
        nu = np.empty(n, dtype=np.float64)
        gamma = np.empty(n, dtype=np.float64)
        diff = np.empty([n, n], dtype=np.float64)
        for row in range(len(lines)):
            m = lines[row].strip().split(",")
            i = row / n
            j = row % n
            nu[i] = Decimal(m[0])
            gamma[j] = Decimal(m[1])
            diff[i][j] = Decimal(m[2])
        plt.pcolor(gamma, nu, diff, cmap="coolwarm")
        plt.title("The Difference of Guassian Classifier with Different nu, gamma")
        plt.xlabel("gamma")
        plt.ylabel("nu")
        plt.xscale("log")
        plt.yscale("log")
        plt.colorbar()
        plt.show()
项目:MicroGrids    作者:squoilin    | 项目源码 | 文件源码
def Energy_Flow(Time_Series):


    Energy_Flow = {'Energy_Demand':0, 'Lost Load':0, 'Energy PV':0,'Curtailment':0, 'Energy Diesel':0, 'Discharge energy from the Battery':0, 'Charge energy to the Battery':0}

    for v in Energy_Flow.keys():
        if v == 'Energy PV':
            Energy_Flow[v] = round((Time_Series[v].sum() - Time_Series['Curtailment'].sum()- Time_Series['Charge energy to the Battery'].sum())/1000000, 2)
        else:
            Energy_Flow[v] = round((Time_Series[v].sum())/1000000, 2)


    c = ['From Generator', 'To Battery', 'Demand', 'From PV', 'From Battery', 'Curtailment', 'Lost Load']       
    plt.figure()    
    plt.bar((1,2,3,4,5,6,7), Energy_Flow.values(), color= 'b', alpha=0.3, align='center')

    plt.xticks((1.2,2.2,3.2,4.2,5.2,6.2,7.2), c)
    plt.xlabel('Technology')
    plt.ylabel('Energy Flow (MWh)')
    plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='on')
    plt.xticks(rotation=-30)
    plt.savefig('Results/Energy_Flow.png', bbox_inches='tight')
    plt.show()    

    return Energy_Flow
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def show(self):
        keys = []
        values = []
        for (k, v) in self.letter_db.iteritems():
            total = v['total']
            right = v['right']
            keys.append(k)
            values.append(100 * float(right / float(total)))

        groups = len(self.letter_db)
        index = np.arange(groups)
        width = 0.5
        opacity = 0.4

        plt.bar(index, values, linewidth = width, alpha = opacity, color = 'b', label = 'right rate')

        plt.xlabel('letter')
        plt.ylabel('predict rgith rate (%)')
        plt.title('Writer identify: letter right rate')
        plt.xticks(index + width, keys)
        plt.ylim(0, 100)
        plt.legend()
        plt.show()
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval(precisions, save_file):
    # markers = ["|", "D", "8", "v", "^", ">", "h", "H", "s", "*", "p", "d", "<"]
    markers = ["D", "p", 's', "*", "d", "8", "^", "H", "v", ">", "<", "h", "|"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.7, marker=markers[i],
                        markersize=8, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Fraction of Retrieved Documents')
    plt.ylabel('Precision')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval_by_length(precisions, save_file):
    markers = ["o", "v", "8", "s", "p", "*", "h", "H", "^", "x", "D"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.6, marker=markers[i],
                        markersize=6, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Document Sorted by Length')
    plt.ylabel('Precision (%)')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_cdf_model_and_meansh(self, cdfs, tag, cdf0_1s, aucs, bx, dx):
        plt.close("all")
        x = np.arange(0, bx, dx)
        fig, ax = plt.subplots(nrows=1, ncols=1)
        ax.plot(x, cdfs[0], label="CDF model")
        ax.plot(x, cdfs[1], label="CDF mean shape")
        ax.grid(True)
        plt.xlabel("NRMSE")
        plt.ylabel("Data proportion")
        plt.legend(loc=4, prop={'size': 8}, fancybox=True, shadow=True)
        plt.title(
            "CDF curve: " + tag + ". Model: CDF0.1: " +
            str(prec2 % cdf0_1s[0]) + " . AUC:" + str(prec2 % aucs[0]) +
            ".\n" + ". MSh: CDF0.1: " +
            str(prec2 % cdf0_1s[1]) + " . AUC:" + str(prec2 % aucs[1]) + ".\n")
        return fig
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_errors(self, valid, train, path, epoc=-1):
        '''Plot then save the figure of the error over the valid and train sets calculated during  the gradient descent. ***** CLASSIFICATION

        The figure won't be displayed.  
        valid: list of errors over the validation set
        train: list of errors over the train set
        path: path where to save the figure.
        '''
        fig = plt.figure()
        train_gp, = plt.plot(train, '-r')
        valid_gp, = plt.plot(valid, '-*g')
        if epoc >= 0:
            epoc = epoc - 1 # ploting starts from 0
            stop, = plt.plot([epoc, epoc], [0, max(valid + train) + 5], '--b', lw=2)
            plt.legend([train_gp, valid_gp, stop], ['train error', 'valid error', 'stop learning, epoch='+str(epoc + 1)], fancybox=True, shadow=True)
        else:
            plt.legend([train_gp, valid_gp], ['train error', 'valid error'], fancybox=True, shadow=True)

        plt.title('Train/valid error during the gradient descent')
        plt.xlabel(u"n° epoch")
        plt.ylabel('Error (100 - accuracy) %')
        fig.savefig(path, bbox_inches='tight')
        # to display the figure
        #plt.show()
项目:nn4nlp-code    作者:neubig    | 项目源码 | 文件源码
def plot_histogram(counter, label, plot=None):
    import matplotlib.pyplot as plt
    plt.figure()
    nums = list(counter.keys())
    counts = list(counter.values())
    indices = range(len(counts))
    bars = plt.bar(indices, counts, align="center")
    plt.xticks(indices, nums)
    top = 1.06 * max(counts)
    plt.ylim(min(counts), top)
    plt.xlabel("number of %s" % label)
    plt.ylabel("count")
    for bar in bars:
        count = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2., count, "%.1f%%" % (100.0 * count / sum(counts)),
                 ha="center", va="bottom")
    if plot:
        plt.savefig(plot + "histogram_" + label + ".png")
    else:
        plt.show()
项目:easyML    作者:aarshayj    | 项目源码 | 文件源码
def algo_specific_fit(self, printTopN):
        # print Feature Importance Scores table
        self.feature_imp = pd.Series(
                            self.alg.feature_importances_, 
                            index=self.predictors
                            ).sort_values(ascending=False)

        self.plot_feature_importance(printTopN)

        self.model_output['Feature_Importance'] = \
                                        self.feature_imp.to_string()

        #Plot OOB estimates if subsample <1:
        if self.model_output['subsample']<1:
            plt.xlabel("GBM Iteration")
            plt.ylabel("Score")
            plt.plot(
                range(1, self.model_output['n_estimators']+1), 
                self.alg.oob_improvement_
                )
            plt.legend(['oob_improvement_','train_score_'], loc='upper left')
            plt.show(block=False)
项目:kmeans-service    作者:MAYHEM-Lab    | 项目源码 | 文件源码
def plot_spatial_cluster_fig(data, covar_type_tied_labels_k):
    """ Creates a 3x2 plot spatial plot using labels as the color """
    sns.set(context='talk', style='white')
    data.columns = [c.lower() for c in data.columns]
    fig = plt.figure()
    placement = {'full': {True: 1, False: 4}, 'diag': {True: 2, False: 5}, 'spher': {True: 3, False: 6}}

    lim_left = data['longitude'].min()
    lim_right = data['longitude'].max()
    lim_bottom = data['latitude'].min()
    lim_top = data['latitude'].max()
    for covar_type, covar_tied, labels, k in covar_type_tied_labels_k:
        plt.subplot(2, 3, placement[covar_type][covar_tied])
        plt.scatter(data['longitude'], data['latitude'], c=labels, cmap=plt.cm.rainbow, s=10)
        plt.xlim(left=lim_left, right=lim_right)
        plt.ylim(bottom=lim_bottom, top=lim_top)
        plt.xticks([])
        plt.yticks([])
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        plt.title('{}-{}, K={}'.format(covar_type.capitalize(), ['Untied', 'Tied'][covar_tied], k))
    plt.tight_layout()
    return fig
项目:autonomio    作者:autonomio    | 项目源码 | 文件源码
def paramagg(data):

    '''
    USE: paramagg(df)

    Provides an overview in one plot for a parameter scan. Useful
    to understand rough distribution of accuracacy and loss for both
    test and train.

    data = a pandas dataframe from hyperscan()
    '''

    plt.figure(num=None, figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')

    plt.scatter(data.train_loss, data.train_acc, label='train')
    plt.scatter(data.test_loss, data.test_acc, label='test')

    plt.legend(loc='upper right')
    plt.tick_params(axis='both', which='major', pad=15)

    plt.xlabel('loss', fontsize=18, labelpad=15, color="gray")
    plt.ylabel('accuracy', fontsize=18, labelpad=15, color="gray")

    plt.show()
项目:Twitter-and-IMDB-Sentimental-Analytics    作者:abhinandanramesh    | 项目源码 | 文件源码
def make_plot(counts):
    """
    Plot the counts for the positive and negative words for each timestep.
    Use plt.show() so that the plot will popup.
    """
    positive = []
    negative = []

    for count in counts:
    for word in count:
        if word[0] == "positive":
            positive.append(word[1])
        else:
            negative.append(word[1])

    plt.axis([-1, len(positive), 0, max(max(positive),max(negative))+100])
    pos, = plt.plot(positive, 'b-', marker = 'o', markersize = 10)
    neg, = plt.plot(negative, 'g-', marker = 'o', markersize = 10)
    plt.legend((pos,neg),('Positive','Negative'),loc=2)
    plt.xticks(np.arange(0, len(positive), 1))
    plt.xlabel("Time Step")
    plt.ylabel("Word Count")
    plt.show()
项目:OpenAPS    作者:medicinexlab    | 项目源码 | 文件源码
def _plot_old_pred_data(old_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, algorithm_str, minutes_str):
    actual_bg_array = old_pred_data.result_actual_bg_array
    actual_bg_time_array = old_pred_data.result_actual_bg_time_array
    pred_array = old_pred_data.result_pred_array
    pred_time_array = old_pred_data.result_pred_time_array

    #Root mean squared error
    rms = math.sqrt(metrics.mean_squared_error(actual_bg_array, pred_array))
    print "                Root Mean Squared Error: " + str(rms)
    print "                Mean Absolute Error: " + str(metrics.mean_absolute_error(actual_bg_array, pred_array))
    print "                R^2 Coefficient of Determination: " + str(metrics.r2_score(actual_bg_array, pred_array))

    plot, zone = ClarkeErrorGrid.clarke_error_grid(actual_bg_array, pred_array, id_str + " " + algorithm_str + " " + minutes_str)
    print "                Percent A:{}".format(float(zone[0]) / (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Percent C, D, E:{}".format(float(zone[2] + zone[3] + zone[4])/ (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
    print "                Zones are A:{}, B:{}, C:{}, D:{}, E:{}\n".format(zone[0],zone[1],zone[2],zone[3],zone[4])
    if save_clarke_plot: plt.savefig(id_str + algorithm_str.replace(" ", "") + minutes_str + "clarke.png")
    if show_clarke_plot: plot.show()

    plt.clf()
    plt.plot(actual_bg_time_array, actual_bg_array, label="Actual BG", color='black', linestyle='-')
    plt.plot(pred_time_array, pred_array, label="BG Prediction", color='black', linestyle=':')
    plt.title(id_str + " " + algorithm_str + " " + minutes_str + " BG Analysis")
    plt.ylabel("Blood Glucose Level (mg/dl)")
    plt.xlabel("Time (minutes)")
    plt.legend(loc='upper left')

    # SHOW/SAVE PLOT DEPENDING ON THE BOOLEAN PARAMETER
    if save_pred_plot: plt.savefig(id_str + algorithm_str.replace(" ","") + minutes_str + "plot.png")
    if show_pred_plot: plt.show()


#Function to analyze the old OpenAPS data
项目:LinearCorex    作者:gregversteeg    | 项目源码 | 文件源码
def plot_convergence(history, prefix='', prefix2=''):
    plt.figure(figsize=(8, 5))
    ax = plt.subplot(111)

    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

    plt.plot(history["TC"], '-', lw=2.5, color=tableau20[0])
    x = len(history["TC"])
    y = np.max(history["TC"])
    plt.text(0.5 * x, 0.8 * y, "TC", fontsize=18, fontweight='bold', color=tableau20[0])

    if history.has_key("additivity"):
        plt.plot(history["additivity"], '-', lw=2.5, color=tableau20[1])
        plt.text(0.5 * x, 0.3 * y, "additivity", fontsize=18, fontweight='bold', color=tableau20[1])

    plt.ylabel('TC', fontsize=12, fontweight='bold')
    plt.xlabel('# Iterations', fontsize=12, fontweight='bold')
    plt.suptitle('Convergence', fontsize=12)
    filename = '{}/summary/convergence{}.pdf'.format(prefix, prefix2)
    if not os.path.exists(os.path.dirname(filename)):
        os.makedirs(os.path.dirname(filename))
    plt.savefig(filename, bbox_inches="tight")
    plt.close('all')
    return True
项目:stock-eagle    作者:mtusman    | 项目源码 | 文件源码
def get_stock(symbol):
    last_year_date = datetime.strftime(datetime.now() - relativedelta(years=1), "%Y-%m-%d")
    date = get_last_trading_date()
    url = requests.get('https://www.quandl.com/api/v3/datasets/WIKI/{}.json?start_date={}&end_date={}'.format(symbol, last_year_date, date))
    json_dataset = url.json()
    json_data = json_dataset['dataset']['data']
    dates = []  
    closing = []
    for day in json_data:
        dates.append(datetime.strptime(day[0], "%Y-%m-%d"))
        closing.append(day[4])
    plt.plot_date(dates, closing, '-')
    plt.title(symbol)
    plt.xlabel('Date')
    plt.ylable('Stock Price')
    plt.savefig('foo.png')
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def plot_learning_curve(_, history, folder, debug=True):
    arr = np.asarray(
        map(lambda k: [k['epoch'], k['train_loss'], k['valid_loss']], history))
    plt.figure()
    plt.plot(arr[:, 0], arr[:, 1], 'r', marker='o',
             label='Training loss', linewidth=2.0)
    plt.plot(arr[:, 0], arr[:, 2], 'b', marker='o',
             label='Validation loss', linewidth=2.0)
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.ylim([0.0, np.max(arr[:, 1:]) * 1.3])
    plt.title('Learning curve')
    plt.legend()
    if not debug:
        plt.savefig('%s/learning_curve.png' % folder, bbox_inches='tight')
        plt.close()
项目:ndparse    作者:neurodata    | 项目源码 | 文件源码
def display_pr_curve(precision, recall):
    # following examples from sklearn

    # TODO:  f1 operating point

    import pylab as plt
    # Plot Precision-Recall curve
    plt.clf()
    plt.plot(recall, precision, label='Precision-Recall curve')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('Precision-Recall example: Max f1={0:0.2f}'.format(max_f1))
    plt.legend(loc="lower left")
    plt.show()
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def traffic_districution(self):
        data_dir = g_singletonDataFilePath.getTrainDir()
        df = self.load_trafficdf(data_dir)
        print df['traffic'].describe()
#         sns.distplot(self.gapdf['gap'],kde=False, bins=100);
        df['traffic'].plot(kind='hist', bins=100)
        plt.xlabel('Traffic')
        plt.title('Histogram of Traffic')

        return
#     def disp_gap_bydistrict(self, disp_ids = np.arange(34,67,1), cls1 = 'start_district_id', cls2 = 'time_id'):
# #         disp_ids = np.arange(1,34,1)
#         plt.figure()
#         by_district = self.gapdf.groupby(cls1)
#         size = len(disp_ids)
# #         size = len(by_district)
#         col_len = row_len = math.ceil(math.sqrt(size))
#         count = 1
#         for name, group in by_district:
#             if not name in disp_ids:
#                 continue
#             plt.subplot(row_len, col_len, count)
#             group.groupby(cls2)['gap'].mean().plot()
#             count += 1   
#         return
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def disp_gap_byweather(self):
        df = self.gapdf
        data_dir = g_singletonDataFilePath.getTrainDir()
        dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_prevweather.df.pickle'
        dumpload = DumpLoad(dumpfile_path)
        if dumpload.isExisiting():
            temp_df = dumpload.load()
        else:
            weather_dict = self.get_weather_dict(data_dir)

            temp_df = self.X_y_Df['time_slotid'].apply(self.find_prev_weather_mode, weather_dict=weather_dict)     
            dumpload.dump(temp_df)

        df = pd.concat([df, temp_df],  axis=1)

        gaps_mean = df.groupby('preweather')['gap'].mean()
        gaps_mean.plot(kind='bar')
        plt.ylabel('Mean of gap')
        plt.xlabel('Weather')
        plt.title('Weather/Gap Correlation')
        return
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def plot_one_metric(self, models_metric, title):
        """

        :param models_metric:
        :param title:
        :return:
        """
        for index, model_metric in enumerate(models_metric):
            plt.plot(self.steps, model_metric, label=self.file_desc[index])
        plt.title(title)
        plt.legend()
        plt.xlabel('Number of batches')
        plt.ylabel('Score')
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_trajectories(src_sent, src_encoding, idx):

    # encoding is (time_steps, hidden_dim)
    #pca = PCA(n_components=1)

    #pca_result = pca.fit_transform(src_encoding)
    times = np.arange(src_encoding.shape[0])
    plt.plot(times, src_encoding)
    plt.title(" ".join(src_sent))
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.savefig("misc_hidden_cell_trajectories_"+str(idx), bbox_inches="tight")
    plt.close()
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def plot_test_function(orderx,ordery):
    s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
                                ordery,YMIN,YMAX)
    X,Y = s.get_x2d()
    f_ana = f(X,Y)
    plt.pcolor(X,Y,f_ana)
    plt.xlabel('x',fontsize=16)
    plt.ylabel('y',fontsize=16)
    plt.xlim(XMIN,XMAX)
    plt.ylim(YMIN,YMAX)
    cb = plt.colorbar()
    cb.set_label(label=r'$\cos(x)\sin(2 y)$',fontsize=16)
    for postfix in ['.png','.pdf']:
        name = 'test_function'+postfix
        if USE_FIGS_DIR:
            name = 'figs/' + name
        plt.savefig(name,
                    bbox_inches='tight')
    plt.clf()
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def test_derivatives():
    orders = [4+(2*i) for i in range(12)]
    errors = [test_derivatives_at_order(o) for o in orders]
    plt.semilogy(orders,errors,'bo-',lw=2,ms=12)
    plt.xlabel('order in y-direction',fontsize=16)
    plt.ylabel(r'$|E|_2$',fontsize=16)
    for postfix in ['.png','.pdf']:
        name = 'orthopoly_errors'+postfix
        if USE_FIGS_DIR:
            name = 'figs/' + name
        plt.savefig(name,
                    bbox_inches='tight')
    plt.clf()