Python matplotlib.pyplot 模块,xticks() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.xticks()

项目:HousePricePredictionKaggle    作者:Nuwantha    | 项目源码 | 文件源码
def get_feature_importance(list_of_features):
    n_estimators=10000
    random_state=0
    n_jobs=4
    x_train=data_frame[list_of_features]
    y_train=data_frame.iloc[:,-1]
    feat_labels= data_frame.columns[1:]
    forest = BaggingRegressor(n_estimators=n_estimators,random_state=random_state,n_jobs=n_jobs) 
    forest.fit(x_train,y_train) 
    importances=forest.feature_importances_ 
    indices = np.argsort(importances)[::-1]


    for f in range(x_train.shape[1]):
        print("%2d) %-*s %f" % (f+1,30,feat_labels[indices[f]],
                                        importances[indices[f]]))


    plt.title("Feature Importance")
    plt.bar(range(x_train.shape[1]),importances[indices],color='lightblue',align='center')
    plt.xticks(range(x_train.shape[1]),feat_labels[indices],rotation=90)
    plt.xlim([-1,x_train.shape[1]])
    plt.tight_layout()
    plt.show()
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_bar_chart(label_to_value, title, x_label, y_label):
    """
    Plots a bar chart from a dict.

    Args:
        label_to_value: A dict mapping ints or strings to numerical values (int
            or float).
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    n = len(label_to_value)
    labels = sorted(label_to_value.keys())
    values = [label_to_value[label] for label in labels]
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.bar(range(n), values, align='center')
    plt.xticks(range(n), labels, rotation='vertical', fontsize='7')
    plt.gcf().subplots_adjust(bottom=0.2) # make room for x-axis labels
    plt.show()
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create_figure(predictions_dict):
  """Creates and returns a new figure that visualizes
  attention scores for for a single model predictions.
  """

  # Find out how long the predicted sequence is
  target_words = list(predictions_dict["predicted_tokens"])

  prediction_len = _get_prediction_length(predictions_dict)

  # Get source words
  source_len = predictions_dict["features.source_len"]
  source_words = predictions_dict["features.source_tokens"][:source_len]

  # Plot
  fig = plt.figure(figsize=(8, 8))
  plt.imshow(
      X=predictions_dict["attention_scores"][:prediction_len, :source_len],
      interpolation="nearest",
      cmap=plt.cm.Blues)
  plt.xticks(np.arange(source_len), source_words, rotation=45)
  plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
  fig.tight_layout()

  return fig
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet):
    target_names = map(lambda key: key.replace('_','-'), clf_target_names)

    for idx in range(len(cm)): 
        cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    # plt.matshow(cm)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(clf_target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    # plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=True)
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def plot_axes_scaling(self, iabscissa=1):
        from matplotlib import pyplot
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
            pyplot.text(0, dat.D[-1, 5],
                        'all axes scaling values equal to %s'
                        % str(dat.D[-1, 5]),
                        verticalalignment='center')
            return self  # nothing interesting to plot
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        # pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_gold(g1, g2, lc, p = 0):
    """
    plot sen/spe of g1 against g2
    only consider workers in lc
    """

    mv = crowd_model.mv_model(lc)
    s1 = []; s2 = []

    for w in g1.keys():
        if w in g2 and g1[w][p] != None and g2[w][p] != None and w in mv.dic_ss:
            s1.append(g1[w][p])
            s2.append(g2[w][p])

    plt.xticks((0, 0.5, 1), ("0", "0.5", "1"))
    plt.tick_params(labelsize = 25)
    plt.yticks((0, 0.5, 1), ("0", "0.5", "1"))

    plt.xlim(0,1)
    plt.ylim(0,1)
    plt.scatter(s1, s2, marker = '.', s=50, c = 'black')

    plt.xlabel('task 1 sen.', fontsize = 25)
    plt.ylabel('task 2 sen.', fontsize = 25)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_cmc(cmcs, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'lower right'
  # open new page for current plot
  figure = pyplot.figure()

  max_R = 0
  # plot the CMC curves
  for i in range(len(cmcs)):
    probs = bob.measure.cmc(cmcs[i])
    R = len(probs)
    pyplot.semilogx(range(1, R+1), probs, figure=figure, color=colors[i], label=labels[i])
    max_R = max(R, max_R)

  # change axes accordingly
  ticks = [int(t) for t in pyplot.xticks()[0]]
  pyplot.xlabel('Rank')
  pyplot.ylabel('Probability')
  pyplot.xticks(ticks, [str(t) for t in ticks])
  pyplot.axis([0, max_R, -0.01, 1.01])
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, col, title, cmap=plt.cm.viridis):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    for i in range(cm.shape[0]):
        plt.annotate("%.2f" %cm[i][i],xy=(i,i),
                    horizontalalignment='center',
                    verticalalignment='center')
    plt.title(title,fontsize=18)
    plt.colorbar(fraction=0.046, pad=0.04)
    tick_marks = np.arange(len(col.unique()))
    plt.xticks(tick_marks, sorted(col.unique()),rotation=90)
    plt.yticks(tick_marks, sorted(col.unique()))
    plt.tight_layout()
    plt.ylabel('True label',fontsize=18)
    plt.xlabel('Predicted label',fontsize=18)

#using flavor network to project recipes from ingredient matrix to flavor matrix
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:MicroGrids    作者:squoilin    | 项目源码 | 文件源码
def Energy_Flow(Time_Series):


    Energy_Flow = {'Energy_Demand':0, 'Lost Load':0, 'Energy PV':0,'Curtailment':0, 'Energy Diesel':0, 'Discharge energy from the Battery':0, 'Charge energy to the Battery':0}

    for v in Energy_Flow.keys():
        if v == 'Energy PV':
            Energy_Flow[v] = round((Time_Series[v].sum() - Time_Series['Curtailment'].sum()- Time_Series['Charge energy to the Battery'].sum())/1000000, 2)
        else:
            Energy_Flow[v] = round((Time_Series[v].sum())/1000000, 2)


    c = ['From Generator', 'To Battery', 'Demand', 'From PV', 'From Battery', 'Curtailment', 'Lost Load']       
    plt.figure()    
    plt.bar((1,2,3,4,5,6,7), Energy_Flow.values(), color= 'b', alpha=0.3, align='center')

    plt.xticks((1.2,2.2,3.2,4.2,5.2,6.2,7.2), c)
    plt.xlabel('Technology')
    plt.ylabel('Energy Flow (MWh)')
    plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='on')
    plt.xticks(rotation=-30)
    plt.savefig('Results/Energy_Flow.png', bbox_inches='tight')
    plt.show()    

    return Energy_Flow
项目:MicroGrids    作者:squoilin    | 项目源码 | 文件源码
def LDR(Time_Series):

    columns=['Consume diesel', 'Lost Load', 'Energy PV','Curtailment','Energy Diesel', 
             'Discharge energy from the Battery', 'Charge energy to the Battery', 
             'Energy_Demand',  'State_Of_Charge_Battery'  ]
    Sort_Values = Time_Series.sort('Energy_Demand', ascending=False)

    index_values = []

    for i in range(len(Time_Series)):
        index_values.append((i+1)/float(len(Time_Series))*100)

    Sort_Values = pd.DataFrame(Sort_Values.values/1000, columns=columns, index=index_values)

    plt.figure() 
    ax = Sort_Values['Energy_Demand'].plot(style='k-',linewidth=1)

    fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
    xticks = mtick.FormatStrFormatter(fmt)
    ax.xaxis.set_major_formatter(xticks)
    ax.set_ylabel('Load (kWh)')
    ax.set_xlabel('Percentage (%)')

    plt.savefig('Results/LDR.png', bbox_inches='tight')
    plt.show()
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def show(self):
        keys = []
        values = []
        for (k, v) in self.letter_db.iteritems():
            total = v['total']
            right = v['right']
            keys.append(k)
            values.append(100 * float(right / float(total)))

        groups = len(self.letter_db)
        index = np.arange(groups)
        width = 0.5
        opacity = 0.4

        plt.bar(index, values, linewidth = width, alpha = opacity, color = 'b', label = 'right rate')

        plt.xlabel('letter')
        plt.ylabel('predict rgith rate (%)')
        plt.title('Writer identify: letter right rate')
        plt.xticks(index + width, keys)
        plt.ylim(0, 100)
        plt.legend()
        plt.show()
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval(precisions, save_file):
    # markers = ["|", "D", "8", "v", "^", ">", "h", "H", "s", "*", "p", "d", "<"]
    markers = ["D", "p", 's', "*", "d", "8", "^", "H", "v", ">", "<", "h", "|"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.7, marker=markers[i],
                        markersize=8, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Fraction of Retrieved Documents')
    plt.ylabel('Precision')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval_by_length(precisions, save_file):
    markers = ["o", "v", "8", "s", "p", "*", "h", "H", "^", "x", "D"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.6, marker=markers[i],
                        markersize=6, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Document Sorted by Length')
    plt.ylabel('Precision (%)')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:nn4nlp-code    作者:neubig    | 项目源码 | 文件源码
def plot_histogram(counter, label, plot=None):
    import matplotlib.pyplot as plt
    plt.figure()
    nums = list(counter.keys())
    counts = list(counter.values())
    indices = range(len(counts))
    bars = plt.bar(indices, counts, align="center")
    plt.xticks(indices, nums)
    top = 1.06 * max(counts)
    plt.ylim(min(counts), top)
    plt.xlabel("number of %s" % label)
    plt.ylabel("count")
    for bar in bars:
        count = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2., count, "%.1f%%" % (100.0 * count / sum(counts)),
                 ha="center", va="bottom")
    if plot:
        plt.savefig(plot + "histogram_" + label + ".png")
    else:
        plt.show()
项目:kmeans-service    作者:MAYHEM-Lab    | 项目源码 | 文件源码
def plot_spatial_cluster_fig(data, covar_type_tied_labels_k):
    """ Creates a 3x2 plot spatial plot using labels as the color """
    sns.set(context='talk', style='white')
    data.columns = [c.lower() for c in data.columns]
    fig = plt.figure()
    placement = {'full': {True: 1, False: 4}, 'diag': {True: 2, False: 5}, 'spher': {True: 3, False: 6}}

    lim_left = data['longitude'].min()
    lim_right = data['longitude'].max()
    lim_bottom = data['latitude'].min()
    lim_top = data['latitude'].max()
    for covar_type, covar_tied, labels, k in covar_type_tied_labels_k:
        plt.subplot(2, 3, placement[covar_type][covar_tied])
        plt.scatter(data['longitude'], data['latitude'], c=labels, cmap=plt.cm.rainbow, s=10)
        plt.xlim(left=lim_left, right=lim_right)
        plt.ylim(bottom=lim_bottom, top=lim_top)
        plt.xticks([])
        plt.yticks([])
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        plt.title('{}-{}, K={}'.format(covar_type.capitalize(), ['Untied', 'Tied'][covar_tied], k))
    plt.tight_layout()
    return fig
项目:OASIS    作者:j-friedrich    | 项目源码 | 文件源码
def plot_trace(n=0, lg=False):
    plt.plot(trueC[n], c=col[2], clip_on=False, zorder=5, label='Truth')
    plt.plot(solution, c=col[0], clip_on=False, zorder=7, label='Estimate')
    plt.plot(y, c=col[7], alpha=.7, lw=1, clip_on=False, zorder=-10, label='Data')
    if lg:
        plt.legend(frameon=False, ncol=3, loc=(.1, .62), columnspacing=.8)
    spks = np.append(0, solution[1:] - g * solution[:-1])
    plt.text(800, 2.2, 'Correlation: %.3f' % (np.corrcoef(trueSpikes[n], spks)[0, 1]), size=24)
    plt.gca().set_xticklabels([])
    simpleaxis(plt.gca())
    plt.ylim(0, 2.85)
    plt.xlim(0, 1500)
    plt.yticks([0, 2], [0, 2])
    plt.xticks([300, 600, 900, 1200], ['', ''])


# init params
项目:Twitter-and-IMDB-Sentimental-Analytics    作者:abhinandanramesh    | 项目源码 | 文件源码
def make_plot(counts):
    """
    Plot the counts for the positive and negative words for each timestep.
    Use plt.show() so that the plot will popup.
    """
    positive = []
    negative = []

    for count in counts:
    for word in count:
        if word[0] == "positive":
            positive.append(word[1])
        else:
            negative.append(word[1])

    plt.axis([-1, len(positive), 0, max(max(positive),max(negative))+100])
    pos, = plt.plot(positive, 'b-', marker = 'o', markersize = 10)
    neg, = plt.plot(negative, 'g-', marker = 'o', markersize = 10)
    plt.legend((pos,neg),('Positive','Negative'),loc=2)
    plt.xticks(np.arange(0, len(positive), 1))
    plt.xlabel("Time Step")
    plt.ylabel("Word Count")
    plt.show()
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
        edges = cv2.Canny(img,obj.minVal,obj.maxVal)
        color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        edges=color

        if True:
            print "zeige"
            cv2.imshow(obj.Label,edges)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=edges
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        print self
        print self.Object
        print self.Object.Name
        obj=self.Object
        img = cv2.imread(obj.imageFile)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = np.float32(gray)
        dst = cv2.cornerHarris(gray,3,3,0.00001)
        dst = cv2.dilate(dst,None)
        img[dst>0.01*dst.max()]=[0,0,255]

        from matplotlib import pyplot as plt
        plt.subplot(121),plt.imshow(img,cmap = 'gray')
        plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(dst,cmap = 'gray')
        plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
        plt.show()
项目:quoll    作者:LanguageMachines    | 项目源码 | 文件源码
def visualize_document_topic_probs(self, outfile):
        plots = []
        height_cumulative = numpy.zeros(self.rows)
        #fig = pyplot.figure(figsize=(21, 10), dpi=550)
        for column in range(self.columns):
            color = pyplot.cm.coolwarm(column/self.columns, 1)
            if column == 0:
                p = pyplot.bar(self.ind, self.document_topics_raw[:, column], self.barwidth, color=color)
            else:
                p = pyplot.bar(self.ind, self.document_topics_raw[:, column], self.barwidth, bottom=height_cumulative, color=color)
            height_cumulative += self.document_topics_raw[:, column]
            plots.append(p)
        pyplot.ylim((0, 1))
        pyplot.ylabel('Topics')
        pyplot.title('Topic distribution of CLS papers')
        pyplot.xticks(self.ind+self.barwidth/2, self.document_names, rotation='vertical', size = 10)
        pyplot.yticks(numpy.arange(0, 1, 10))
        pyplot.legend([p[0] for p in plots], self.topic_labels, bbox_to_anchor=(1, 1))
        self.fig.tight_layout()
        pyplot.savefig(outfile)
项目:trend_ml_toolkit_xgboost    作者:raymon-tian    | 项目源码 | 文件源码
def fea_plot(xg_model, feature, label, type = 'weight', max_num_features = None):
    fig, AX = plt.subplots(nrows=1, ncols=2)
    xgb.plot_importance(xg_model, xlabel=type, importance_type='weight', ax=AX[0], max_num_features=max_num_features)

    fscore = xg_model.get_score(importance_type=type)
    fscore = sorted(fscore.items(), key=itemgetter(1), reverse=True) # sort scores
    fea_index = get_fea_index(fscore, max_num_features)
    feature = feature[:, fea_index]
    dimension = len(fea_index)
    X = range(1, dimension+1)
    Yp = np.mean(feature[np.where(label==1)[0]], axis=0)
    Yn = np.mean(feature[np.where(label!=1)[0]], axis=0)
    for i in range(0, dimension):
        param = np.fmax(Yp[i], Yn[i])
        Yp[i] /= param
        Yn[i] /= param
    p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
    p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
    AX[1].legend((p1,p2), ('Malware', 'Normal'))
    AX[1].set_title('Comparison of selected features by their means')
    AX[1].set_xlabel('Feature Index')
    AX[1].set_ylabel('Mean Value')
    AX[1].set_ylim(-1.1, 1.1)
    plt.xticks(X, fea_index+1, rotation=80)
    plt.suptitle('Feature Selection results')
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def plot_axes_scaling(self, iabscissa=1):
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def plot_feature_importances(forest, patch_name_nfeatures, layer_name):
    importances = forest.feature_importances_
    n_features = len(importances)
    plt.figure()
    plt.title("Feature importances (layer %s)" % str(layer_name))
    bar_list = plt.bar(range(n_features), importances, color="r", align="center")
    if n_features < 50:
        plt.xticks(range(n_features), range(n_features))
    plt.xlim([-1, n_features])

    PATCH_COLORS = ["orangered", "orange", "green", "purple", "cyan", "blue", "red", "yellow"]

    bar_id = 0
    patches = list()
    for i, (patch_name, n_bars) in enumerate(patch_name_nfeatures):
        patches.append(mpatches.Patch(color=PATCH_COLORS[i], label=patch_name))
        for b in range(n_bars):
            bar_list[bar_id].set_color(PATCH_COLORS[i])
            bar_id += 1
        plt.legend(handles = patches)
项目:squeezenet-keras    作者:chasingbob    | 项目源码 | 文件源码
def update(self, conf_mat, classes, normalize=False):
        """This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.
        """
        plt.imshow(conf_mat, interpolation='nearest', cmap=self.cmap)
        plt.title(self.title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        if normalize:
            conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=1)[:, np.newaxis]

        thresh = conf_mat.max() / 2.
        for i, j in itertools.product(range(conf_mat.shape[0]), range(conf_mat.shape[1])):
            plt.text(j, i, conf_mat[i, j],                                          
                         horizontalalignment="center",
                         color="white" if conf_mat[i, j] > thresh else "black")

        plt.tight_layout()                                                    
        plt.ylabel('True label')                                              
        plt.xlabel('Predicted label')                                         
        plt.draw()
项目:Bag-of-Visual-Words-Python    作者:kushalvyas    | 项目源码 | 文件源码
def plotHist(self, vocabulary = None):
        print "Plotting histogram"
        if vocabulary is None:
            vocabulary = self.mega_histogram

        x_scalar = np.arange(self.n_clusters)
        y_scalar = np.array([abs(np.sum(vocabulary[:,h], dtype=np.int32)) for h in range(self.n_clusters)])

        print y_scalar

        plt.bar(x_scalar, y_scalar)
        plt.xlabel("Visual Word Index")
        plt.ylabel("Frequency")
        plt.title("Complete Vocabulary Generated")
        plt.xticks(x_scalar + 0.4, x_scalar)
        plt.show()
项目:johnson-county-ddj-public    作者:dssg    | 项目源码 | 文件源码
def plot_normalized_confusion_matrix_at_depth(self):
        """ Returns a normalized confusion matrix.

        :returns: normalized confusion matrix
        :rtype: matplotlib figure
        """
        cm = metrics.confusion_matrix(self.predictions['label'], self.y_pred)
        np.set_printoptions(precision = 2)
        fig = plt.figure()
        cm_normalized = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]

        plt.imshow(cm_normalized, interpolation = 'nearest',
                   cmap = plt.cm.Blues)
        plt.title("Normalized Confusion Matrix")
        plt.colorbar()
        tick_marks = np.arange(len(self.labels))
        plt.xticks(tick_marks, self.labels, rotation = 45)
        plt.yticks(tick_marks, self.labels)
        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        return(fig)
项目:Python-Machine-Learning-Cookbook    作者:PacktPublishing    | 项目源码 | 文件源码
def plot_feature_importances(feature_importances, title, feature_names):
    # Normalize the importance values 
    feature_importances = 100.0 * (feature_importances / max(feature_importances))

    # Sort the values and flip them
    index_sorted = np.flipud(np.argsort(feature_importances))

    # Arrange the X ticks
    pos = np.arange(index_sorted.shape[0]) + 0.5

    # Plot the bar graph
    plt.figure()
    plt.bar(pos, feature_importances[index_sorted], align='center')
    plt.xticks(pos, feature_names[index_sorted])
    plt.ylabel('Relative Importance')
    plt.title(title)
    plt.show()
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _create_figure(predictions_dict):
  """Creates and returns a new figure that visualizes
  attention scores for for a single model predictions.
  """

  # Find out how long the predicted sequence is
  target_words = list(predictions_dict["predicted_tokens"])

  prediction_len = _get_prediction_length(predictions_dict)

  # Get source words
  source_len = predictions_dict["features.source_len"]
  source_words = predictions_dict["features.source_tokens"][:source_len]

  # Plot
  fig = plt.figure(figsize=(8, 8))
  plt.imshow(
      X=predictions_dict["attention_scores"][:prediction_len, :source_len],
      interpolation="nearest",
      cmap=plt.cm.Blues)
  plt.xticks(np.arange(source_len), source_words, rotation=45)
  plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
  fig.tight_layout()

  return fig
项目:StrepHit    作者:Wikidata    | 项目源码 | 文件源码
def about_biographies_count(corpus):
    """ Finds how many items have/don't have a biography
    """
    count = with_bio = characters = 0
    for doc in load_scraped_items(corpus):
        count += 1
        if doc.get('bio') and len(doc['bio']) > 5:
            with_bio += 1
            characters += len(doc['bio'])

    print 'Total number of items:', count
    print 'Items with a biography %d (%.2f %%)' % (with_bio, 100. * with_bio / count)
    print 'Cumulative length of biographies: %d characters' % characters

    try:
        import matplotlib.pyplot as plt
    except ImportError:
        logger.warn('Cannot import matplotlib, skipping chart')
        return

    plt.bar([0, 1], [count - with_bio, with_bio], width=0.75)
    plt.xticks([0.375, 1.375], ['Without Biography', 'With Biography'])
    plt.grid(True, axis='y')
    plt.xlim((-0.5, 2.25))
    plt.show()
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, names=None, title='Confusion Matrix', cmap=plt.cm.Blues):
    plt.figure(4)
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()

    # Add labels to confusion matrix:
    if names is None:
        names = range(cm.shape[0])

    tick_marks = np.arange(len(names))
    plt.xticks(tick_marks, names, rotation=45)
    plt.yticks(tick_marks, names)

    plt.tight_layout()
    plt.ylabel('Correct label')
    plt.xlabel('Predicted label')
    plt.show()

# Generate confusion matrix for Jaffe
# results = list of tuples of (correct label, predicted label)
#           e.g. [ ('HA', 3) ]
# categories = list of category names
# Returns confusion matrix; rows are correct labels and columns are predictions
项目:actions-for-actions    作者:gsig    | 项目源码 | 文件源码
def set_axes(ax,xticks,xlabels,xticks2,xlabels2,count,version):
    # fits all the x labels on the x axis by either rotating or stacking them
    ax.set_xticks(xticks)
    ax.set_xticklabels(xlabels)
    def printlabel(xticks2,xlabels2,ax,offset,rotation):
        ax2 = ax.twiny()
        ax2.xaxis.set_ticks_position("bottom")
        ax2.xaxis.set_label_position("bottom")
        ax2.spines["bottom"].set_position(("axes", offset))
        ax2.set_frame_on(False)
        ax2.patch.set_visible(False)
        for sp in ax2.spines.itervalues():
            sp.set_visible(False)
        ax2.spines["bottom"].set_visible(True)
        ax2.set_xticks(xticks2)
        ax2.set_xticklabels(xlabels2,verticalalignment='top',fontsize=12,rotation=rotation)
        ax2.xaxis.set_tick_params(width=0)
        ax2.set_xlim([0,count])
    if version=='dual': 
        printlabel(xticks2[::2],xlabels2[::2],ax,-0.15,0) # set 1st row label offset
        printlabel(xticks2[1::2],xlabels2[1::2],ax,-0.25,0) # set 2nd row label offset
    if version=='single':
        printlabel(xticks2,xlabels2,ax,-0.15,22) # set rotated label offset and rotation
    ax.set_xlim([0,count])
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def plot_axes_scaling(self, iabscissa=1):
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:Machine-Learning-Algorithms    作者:PacktPublishing    | 项目源码 | 文件源码
def show_classification_areas(X, Y, lr):
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
    Z = lr.predict(np.c_[xx.ravel(), yy.ravel()])

    Z = Z.reshape(xx.shape)
    plt.figure(1, figsize=(30, 25))
    plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Pastel1)

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=np.abs(Y - 1), edgecolors='k', cmap=plt.cm.coolwarm)
    plt.xlabel('X')
    plt.ylabel('Y')

    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())

    plt.show()
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def heatmap(src_sent, tgt_sent, att_weights, idx):

    plt.figure(figsize=(8, 6), dpi=80)
    att_probs = np.stack(att_weights, axis=1)

    plt.imshow(att_weights, cmap='gray', interpolation='nearest')
    #src_sent = [ str(s) for s in src_sent]
    #tgt_sent = [ str(s) for s in tgt_sent]
    #plt.xticks(range(0, len(tgt_sent)), tgt_sent, rotation='vertical')
    #plt.yticks(range(0, len(src_sent)), src_sent)
    plt.xticks(range(0, len(tgt_sent)),"")
    plt.yticks(range(0, len(src_sent)),"")
    plt.axis('off')
    plt.savefig("att_matrix_"+str(idx), bbox_inches='tight')
    plt.close()
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def show_heatmap(x, y, attention):
    #print attention[:len(y),:len(x)]
    #print attention[:len(y),:len(x)].shape
    #data = np.transpose(attention[:len(y),:len(x)])
    data = attention[:len(y),:len(x)]
    x, y = y, x

    #ax = plt.axes(aspect=0.4)
    ax = plt.axes()
    heatmap = plt.pcolor(data, cmap=plt.cm.Blues)

    xticks = np.arange(len(y)) + 0.5
    xlabels = y
    yticks = np.arange(len(x)) + 0.5
    ylabels = x
    plt.xticks(xticks, xlabels, rotation='vertical')
    ax.set_yticks(yticks)
    ax.set_yticklabels(ylabels)

    # make it look less like a scatter plot and more like a colored table
    ax.tick_params(axis='both', length=0)
    ax.invert_yaxis()
    ax.xaxis.tick_top()

    plt.colorbar(heatmap)

    plt.show()
    #plt.savefig('./attention-out.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def correct_function():
    # order is para-prim, para-comp, cheat-prim, cheat-comp, scenario-prim, scenario-comp
    SEMPRE = [85.04, 66.98, 77.5, 49.01, 60, 33]
    DEEP_SEMPRE = [95.23, 75.64, 50, 47.05, 42.85, 16.66]

    X = np.arange(3)
    width = (0.8-0.1)/4

    s_p = [SEMPRE[0], SEMPRE[2], SEMPRE[4]]
    s_c = [SEMPRE[1], SEMPRE[3], SEMPRE[5]]
    d_p = [DEEP_SEMPRE[0], DEEP_SEMPRE[2], DEEP_SEMPRE[4]]
    d_c = [DEEP_SEMPRE[1], DEEP_SEMPRE[3], DEEP_SEMPRE[5]]

    plt.bar(X, s_p, width=width, color='#85c1e5')
    plt.bar(X+width, d_p, width=width, color='#254e7b')
    plt.bar(X+2*width+0.1, s_c, width=width, color='#85c1e5')
    plt.bar(X+3*width+0.1, d_c, width=width, color='#254e7b')

    width = (0.8-0.1)/4
    plt.xticks(np.array([width, 3*width+0.1,
                         1+width, 1+3*width+0.1,
                         2+width, 2+3*width+0.1]),
        ["Prim.", "Comp.", "Prim.", "Comp.", "Prim.", "Comp."])
    plt.text(0.4, -10, "Paraphrasing", ha='center', fontsize=18)
    plt.text(1.4, -10, "Scenarios", ha='center', fontsize=18)
    plt.text(2.4, -10, "Composition", ha='center', fontsize=18)
    plt.ylim(0, 100)
    plt.xlim(-0.1, 2.9)
    #plt.tight_layout()
    plt.legend(["SEMPRE", "Neural Net"], loc ="upper right")
    plt.savefig('./figures/correct-function.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def accuracy_against_sempre():
    # order is para-prim, para-comp, cheat-prim, cheat-comp, scenario-prim, scenario-comp
    SEMPRE = [71.4, 50.2, 67.5, 33.3, 34.28, 30.5]
    DEEP_SEMPRE = [89.11, 55.27, 47.5, 29.4, 34.28, 16.66]

    X = np.arange(3)
    width = (0.8-0.1)/4

    s_p = [SEMPRE[0], SEMPRE[2], SEMPRE[4]]
    s_c = [SEMPRE[1], SEMPRE[3], SEMPRE[5]]
    d_p = [DEEP_SEMPRE[0], DEEP_SEMPRE[2], DEEP_SEMPRE[4]]
    d_c = [DEEP_SEMPRE[1], DEEP_SEMPRE[3], DEEP_SEMPRE[5]]

    plt.bar(X, s_p, width=width, color='#85c1e5')
    plt.bar(X+width, d_p, width=width, color='#254e7b')
    plt.bar(X+2*width+0.1, s_c, width=width, color='#85c1e5')
    plt.bar(X+3*width+0.1, d_c, width=width, color='#254e7b')

    width = (0.8-0.1)/4
    plt.xticks(np.array([width, 3*width+0.1,
                         1+width, 1+3*width+0.1,
                         2+width, 2+3*width+0.1]),
        ["Prim.", "Comp.", "Prim.", "Comp.", "Prim.", "Comp."])
    plt.text(0.4, -10, "Paraphrasing", ha='center', fontsize=18)
    plt.text(1.4, -10, "Scenarios", ha='center', fontsize=18)
    plt.text(2.4, -10, "Composition", ha='center', fontsize=18)
    plt.ylim(0, 100)
    plt.xlim(-0.1, 2.9)
    #plt.tight_layout()
    plt.legend(["SEMPRE", "Neural Net"], loc ="upper right")
    plt.savefig('./figures/accuracy-combined.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def extensibility():
    # order is new device acc, new device recall, new domain acc, new domain recall
    SEMPRE = [100 * 117./214., 100 * (10.+63.)/(15.+104.), 100 * (42.+232.)/(535.+75.), 100 * (32.+136.)/(286.+48.)]
    DEEP_SEMPRE = [38, 47, 55, 74]

    X = np.arange(2)
    width = (0.8-0.1)/4

    s_a = [SEMPRE[0], SEMPRE[2]]
    s_r = [SEMPRE[1], SEMPRE[3]]
    d_a = [DEEP_SEMPRE[0], DEEP_SEMPRE[2]]
    d_r = [DEEP_SEMPRE[1], DEEP_SEMPRE[3]]

    plt.bar(X, s_a, width=width, color='#85c1e5')
    plt.bar(X+width, d_a, width=width, color='#254e7b')
    plt.bar(X+2*width+0.1, s_r, width=width, color='#85c1e5')
    plt.bar(X+3*width+0.1, d_r, width=width, color='#254e7b')

    width = (0.8-0.1)/4
    plt.xticks(np.array([width, 3*width+0.1,
                         1+width, 1+3*width+0.1,
                         2+width, 2+3*width+0.1]),
        ["Accuracy", "Recall", "Accuracy", "Recall"])
    plt.text(0.4, -10, "New Device", ha='center', fontsize=18)
    plt.text(1.4, -10, "New Domain", ha='center', fontsize=18)
    plt.ylim(0, 100)
    plt.xlim(-0.1, 1.9)
    #plt.tight_layout()
    plt.legend(["SEMPRE", "Neural Net"], loc ="upper right")
    plt.savefig('./figures/extensibility.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def recall():
    # order is para-prim, para-comp, cheat-prim, cheat-comp, scenario-prim, scenario-comp
    SEMPRE = [81.06, 55.33, 65.38, 34.69, 40.0, 38.46]
    DEEP_SEMPRE = [93.75, 65.93, 60.0, 30.61, 58.33, 22.72]

    X = np.arange(3)
    width = (0.8-0.1)/4

    s_p = [SEMPRE[0], SEMPRE[2], SEMPRE[4]]
    s_c = [SEMPRE[1], SEMPRE[3], SEMPRE[5]]
    d_p = [DEEP_SEMPRE[0], DEEP_SEMPRE[2], DEEP_SEMPRE[4]]
    d_c = [DEEP_SEMPRE[1], DEEP_SEMPRE[3], DEEP_SEMPRE[5]]

    plt.bar(X, s_p, width=width, color='#85c1e5')
    plt.bar(X+width, d_p, width=width, color='#254e7b')
    plt.bar(X+2*width+0.1, s_c, width=width, color='#85c1e5')
    plt.bar(X+3*width+0.1, d_c, width=width, color='#254e7b')

    width = (0.8-0.1)/4
    plt.xticks(np.array([width, 3*width+0.1,
                         1+width, 1+3*width+0.1,
                         2+width, 2+3*width+0.1]),
        ["Prim.", "Comp.", "Prim.", "Comp.", "Prim.", "Comp."])
    plt.text(0.4, -10, "Paraphrasing", ha='center', fontsize=18)
    plt.text(1.4, -10, "Scenarios", ha='center', fontsize=18)
    plt.text(2.4, -10, "Composition", ha='center', fontsize=18)
    plt.ylim(0, 100)
    plt.xlim(-0.1, 2.9)
    #plt.tight_layout()
    plt.legend(["SEMPRE", "Neural Net"], loc ="upper right")
    plt.savefig('./figures/recall.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def model_choices():
    # no attention: model 43
    # full: model 19
    # no grammar/full : model 19

    # no attention/no grammar, +grammar, +attention, full model
    train = [89.09, 89.16, 90.47, 90.49]
    dev = [45.7, 45.8, 55.5, 56.1] 
    test = [40.2, 40.4, 56, 56.6]
    train_recall = [82.30, 82.35, 90.04, 90.05]
    dev_recall = [62.62, 62.63, 76.76, 77.78]
    test_recall = [59.43, 60.37, 69.8, 70.75]

    #plt.newfigure()

    X = 1 + np.arange(4)
    plt.plot(X, train_recall, '--')#, color='#85c1e5')
    plt.plot(X, train, '--x')#, color='#6182a6')
    plt.plot(X, dev_recall, '-+')#
    plt.plot(X, dev, '-o')#
    plt.plot(X, test_recall, '-^')#, color='#6182a6')
    plt.plot(X, test, '-')#, color='#052548')

    plt.ylim(0, 100)
    plt.xlim(0.5, 4.5)

    plt.xticks(X, ["Seq2Seq", "+ Grammar", "+ Attention", "Full Model"])
    plt.tight_layout()

    plt.legend(["Train recall", "Train accuracy", "Dev recall", "Dev accuracy", "Test recall", "Test accuracy"], loc='lower right')
    plt.savefig('./figures/model-choices.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def dataset_train():
    # 0 param, 1 param, 2 param, 3+ param
    base = [1388, 1285, 977, 307]
    paraphrasing = [1185, 2277, 1471, 900]
    ifttt = [1525, 645, 414, 2607]
    generated = [569, 2098, 2723, 4610]

    data = np.array([base, paraphrasing, ifttt, generated])
    p_0 = data[:,0]
    p_1 = data[:,1]
    p_2 = data[:,2]
    p_3 = data[:,3]

    width = 0.7

    X = np.arange(4)
    plt.bar(X, p_3, width=width, color='#ffffff', bottom=p_0+p_1+p_2)
    plt.bar(X, p_2, width=width, color='#cde6f4', bottom=p_0+p_1)
    plt.bar(X, p_1, width=width, color='#85c1e5', bottom=p_0)
    plt.bar(X, p_0, width=width, color='#254e7b')

    plt.xticks(X + width/2, ["Base +\n Author", "Paraphrasing", "IFTTT", "Generated"])
    plt.xlim(-0.3, 4)
    plt.ylim(0, 11000)

    plt.tight_layout()
    plt.legend(["3+ Params", "2 Params", "1 Param", "0 Params"], loc='upper left')
    plt.savefig('./figures/dataset-train.pdf')
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_img(img, title_str, fignum):
    plt.plot(fignum), plt.imshow(img, cmap='gray')
    plt.title(title_str), plt.xticks([]), plt.yticks([])
    fignum += 1  # move onto next figure number
    plt.show()
    return fignum

# read image
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def plot_correlations(self, iabscissa=1):
        """spectrum of correlation matrix and largest correlation"""
        if not hasattr(self, 'corrspec'):
            self.load()
        if len(self.corrspec) < 2:
            return self
        x = self.corrspec[:, iabscissa]
        y = self.corrspec[:, 6:]  # principle axes
        ys = self.corrspec[:, :6]  # "special" values

        from matplotlib.pyplot import semilogy, text, grid, axis, title
        self._enter_plotting()
        semilogy(x, y, '-c')
        # hold(True)
        semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r')
        text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio')
        if ys is not None:
            semilogy(x, 1 + ys[:, 2], '-b')
            text(x[-1], 1 + ys[-1, 2], '1 + min(corr)')
            semilogy(x, 1 - ys[:, 5], '-b')
            text(x[-1], 1 - ys[-1, 5], '1 - max(corr)')
            semilogy(x[:], 1 + ys[:, 3], '-k')
            text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)')
            semilogy(x[:], 1 - ys[:, 4], '-k')
            text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)')
        grid(True)
        ax = array(axis())
        # ax[1] = max(minxend, ax[1])
        axis(ax)
        title('Spectrum (roots) of correlation matrix')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def plot_author_contributions(commit_frame):
    sns.boxplot(x='author', y='stats_total_lines',
                data=commit_frame,
                orient='v')
    plt.title('Code Contributions by Authors')
    plt.xlabel('Author')
    plt.ylabel('Total Lines Committed')
    plt.xticks(rotation=70)
    plt.show()