Python matplotlib.pyplot 模块,plot() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.plot()

项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_sent_trajectories(sents, decode_plot):

    font = {'family' : 'normal',
            'size'   : 14}

    matplotlib.rc('font', **font) 
    i = 0    
    l = ["Portuguese","Catalan"]

    axes = plt.gca()
    #axes.set_xlim([xmin,xmax])
    axes.set_ylim([-1,1])

    for sent, enc in zip(sents, decode_plot):
    if i==2: continue
        i += 1
        #times = np.arange(len(enc))
        times = np.linspace(0,1,len(enc))
        plt.plot(times, enc, label=l[i-1])
    plt.title("Hidden Node Trajectories")
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.legend(loc='best')
    plt.savefig("final_tests/cr_por_cat_hidden_cell_trajectories", bbox_inches="tight")
    plt.close()
项目:qqmbr    作者:ischurov    | 项目源码 | 文件源码
def normvectorfield(xs,ys,fs,**kw):
    """
    plot normalized vector field

    kwargs
    ======

    - length is a desired length of the lines (default: 1)
    - the rest of kwards are passed to plot
    """
    length = kw.pop('length') if 'length' in kw else 1
    x, y = np.meshgrid(xs, ys)
    # calculate vector field
    vx,vy = fs(x,y)
    # plot vecor field
    norm = length /np.sqrt(vx**2+vy**2)
    plt.quiver(x, y, vx * norm, vy * norm, angles='xy',**kw)
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)

    X = create_sequences(db[:-maxlen], win_size=maxlen, step=maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], input_size))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    model.add(LSTM(hidden_size, return_sequences=False, input_shape=(maxlen, input_size)))
    model.add(Dense(maxlen))

    model.load_weights(save_dir + model_name)
    model.compile(loss='mse', optimizer='adam')

    prediction = model.predict(X, batch_size, verbose=1)
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    Y = db[maxlen:]
    plt.plot(prediction, label='prediction')
    plt.plot(Y, label='true')
    plt.legend()
    plt.show()
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def different_training_sets():
    # base+author -> +paraphrasing -> +ifttt -> +generated
    train = [84.7, 93.2, 90.4, 91.99]
    test = [3.6, 37.4, 50.94, 55.4]
    train_recall = [66.6, 88.43, 92.63, 91.21]
    test_recall = [0.066, 49.05, 50.94, 75.47]

    #plt.newfigure()

    X = 1 + np.arange(4)
    plt.plot(X, train_recall, '--', color='#85c1e5')
    plt.plot(X, train, '-x', color='#6182a6')
    plt.plot(X, test_recall, '-o', color='#6182a6')
    plt.plot(X, test, '-', color='#052548')

    plt.ylim(0, 100)
    plt.xlim(0.5, 4.5)

    plt.xticks(X, ["Base + Author", "+ Paraphrasing", "+ IFTTT", "+ Generated"])
    plt.tight_layout()

    plt.legend(["Train recall", "Train accuracy", "Test recall", "Test accuracy"], loc='lower right')
    plt.savefig('./figures/training-sets.pdf')
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_line_graph_multiple_lines(x, label_to_values, title, x_label, y_label):
    if not all(len(x) == len(values) for values in label_to_values.values()):
        raise ValueError('values of label_to_values must have length len(x)')
    colors = ['b','g','r','c','m','y','k']
    line_styles = ['-','--',':']
    for (i, label) in enumerate(sorted(label_to_values.keys())):
        color = colors[i%len(colors)]
        line_style = line_styles[(i//len(colors))%len(line_styles)]
        plt.plot(x,
                 label_to_values[label],
                 label=label,
                 color=color,
                 linestyle=line_style)
    plt.legend(loc='center left', bbox_to_anchor=(1,0.5), prop={'size':9})
    plt.tight_layout(pad=9)
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.show()

# x_min, x_max for example proportion_initiated_by_user
项目:facebook-message-analysis    作者:szheng17    | 项目源码 | 文件源码
def plot_histogram(x, n_bins, title, x_label, y_label):
    """
    Plots a histogram from a list of data.

    Args:
        x: A list of floats representing the data.
        n_bins: An int representing the number of bins to plot.
        title: A string representing the title of the graph.
        x_label: A string representing the label for the x-axis.
        y_label: A string representing the label for the y-axis.
    """
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.hist(x, bins=n_bins)
    plt.show()

    # probability
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def plot_norm_pct_hist( plt, values, binsize, start, **plt_args ):
    x = start
    xvals = []
    yvals = []
    norm = 0.0
    for v in values:
        xvals.append(x)
        yvals.append(v)
        xvals.append(x+binsize)
        norm += v
        yvals.append(v)
        x += binsize
    for i in xrange (len(yvals)):
        yvals[i] = yvals[i]/norm*100.0
    plt.plot( xvals, yvals, **plt_args)
    plt.xlim( start, x )
项目:qqmbr    作者:ischurov    | 项目源码 | 文件源码
def eulersplot(f, xa, xb, ya, n = 500, toolarge = 1E10, **kw):
    """plots numerical solution y'=f

    args
    ====

    - f(x,y): a function in rhs
    - xa: initial value of independent variable
    - xb: final value of independent variable
    - ya: initial value of dependent variable
    - n : number of steps (higher the better)
    """
    h = (xb - xa) / float(n)
    x = [xa] 
    y = [ya]
    for i in range(1,n+1):
        newy = y[-1] + h * f(x[-1], y[-1])
        if abs(newy) > toolarge:
            break
        y.append(newy)
        x.append(x[-1] + h)
    plt.plot(x,y, **kw)
项目:qqmbr    作者:ischurov    | 项目源码 | 文件源码
def vectorfield(xs,ys,fs,**kw):
    """
    plot vector field (no normalization!)

    args
    ====
    fs is a function that returns tuple (vx,vy)

    kwargs
    ======

    - length is a desired length of the lines (default: 1)
    - the rest of kwards are passed to plot
    """
    length= kw.pop('length') if 'length' in kw else 1
    x, y=np.meshgrid(xs, ys)
    # calculate vector field
    vx,vy=fs(x,y)
    # plot vecor field
    norm = length 
    plt.quiver(x, y, vx * norm, vy * norm, angles='xy',**kw)
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def conv1(model):
    n1, n2, x, y, z = model.conv1.W.shape
    fig = plt.figure()
    for nn in range(0, n1):
        ax = fig.add_subplot(4, 5, nn+1, projection='3d')
        ax.set_xlim(0.0, x)
        ax.set_ylim(0.0, y)
        ax.set_zlim(0.0, z)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_zticklabels([])
        for xx in range(0, x):
            for yy in range(0, y):
                for zz in range(0, z):
                    max = np.max(model.conv1.W.data[nn, :])
                    min = np.min(model.conv1.W.data[nn, :])
                    step = (max - min) / 1.0
                    C = (model.conv1.W.data[nn, 0, xx, yy, zz] - min) / step
                    color = cm.cool(C)
                    C = abs(1.0 - C)
                    ax.plot(np.array([xx]), np.array([yy]), np.array([zz]), "o", color=color, ms=7.0*C, mew=0.1)

    plt.savefig("result/graph_conv1.png")
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def create_graph():
    logfile = 'result/log'
    xs = []
    ys = []
    ls = []
    f = open(logfile, 'r')
    data = json.load(f)

    print(data)

    for d in data:
        xs.append(d["iteration"])
        ys.append(d["main/accuracy"])
        ls.append(d["main/loss"])

    plt.clf()
    plt.cla()
    plt.hlines(1, 0, np.max(xs), colors='r', linestyles="dashed")  # y=-1, 1??????
    plt.title(r"loss/accuracy")
    plt.plot(xs, ys, label="accuracy")
    plt.plot(xs, ls, label="loss")
    plt.legend()
    plt.savefig("result/log.png")
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def plot_hist(baseline_samples, target_samples, true_x, true_y):
    baseline_samples = baseline_samples.squeeze()
    target_samples = target_samples.squeeze()

    bmin, bmax = baseline_samples.min(), baseline_samples.max()

    ax = sns.kdeplot(baseline_samples, shade=True, color=(0.6, 0.1, 0.1, 0.2))
    ax = sns.kdeplot(target_samples, shade=True, color=(0.1, 0.1, 0.6, 0.2))
    ax.set_xlim(bmin, bmax)

    y0, y1 = ax.get_ylim()

    plt.plot([true_y, true_y], [0, y1 - (y1 - y0) * 0.01], linewidth=1, color='r')
    plt.title('Predictive' + (f' at {true_x:.2f}' if true_x is not None else ''))

    fig = plt.gcf()
    fig.set_size_inches(9, 9)
    # plt.tight_layout()  # pad=0.4, w_pad=0.5, h_pad=1.0)

    name = utils.DATA_DIR.replace('/', '-')
    # plt.tight_layout(pad=0.6)
    utils.save_fig('predictive-at-point-' + name)
项目:Google-QuickDraw    作者:ankonzoid    | 项目源码 | 文件源码
def plot_unlabeled_images_random(image_list, n, title_str, ypixels, xpixels, seed, filename):
    random.seed(seed)
    index_sample = random.sample(range(len(image_list)), n)
    plt.figure(figsize=(2*n, 2))
    plt.suptitle(title_str)
    for i, ind in enumerate(index_sample):
        ax = plt.subplot(1, n, i + 1)
        plt.imshow(image_list[ind].reshape(ypixels, xpixels))
        plt.gray()
        ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False)
    if 1:
        pylab.savefig(filename, bbox_inches='tight')
    else:
        plt.show()

# plot_compare: given test images and their reconstruction, we plot them for visual comparison
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def plot_traing_info(x, ylist, path):
    """
    Loads log file and plot x and y values as provided by input.
    Saves as <path>/train_log.png
    """
    file_name = os.path.join(path, __train_log_file_name)
    try:
        with open(file_name, "rb") as f:
            log = pickle.load(f)
    except IOError:  # first time
        warnings.warn("There is no {} file here!!!".format(file_name))
        return
    plt.figure()
    x_vals = log[x]
    for y in ylist:
        y_vals = log[y]
        if len(y_vals) != len(x_vals):
            warning.warn("One of y's: {} does not have the same length as x:{}".format(y, x))
        plt.plot(x_vals, y_vals, label=y)
        # assert len(y_vals) == len(x_vals), "not the same len"
    plt.xlabel(x)
    plt.legend()
    #plt.show()
    plt.savefig(file_name[:-3]+'png', bbox_inches='tight')
    plt.close('all')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def recall_from_IoU(IoU, samples=500): 
    """
    plot recall_vs_IoU_threshold
    """

    if not (isinstance(IoU, list) or IoU.ndim == 1):
        raise ValueError('IoU needs to be a list or 1-D')
    iou = np.float32(IoU)

    # Plot intersection over union
    IoU_thresholds = np.linspace(0.0, 1.0, samples)
    recall = np.zeros_like(IoU_thresholds)
    for idx, IoU_th in enumerate(IoU_thresholds):
        tp, relevant = 0, 0
        inds, = np.where(iou >= IoU_th)
        recall[idx] = len(inds) * 1.0 / len(IoU)

    return recall, IoU_thresholds 

# =====================================================================
# Generic utility functions for object recognition
# ---------------------------------------------------------------------
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def plot_axes_scaling(self, iabscissa=1):
        from matplotlib import pyplot
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
            pyplot.text(0, dat.D[-1, 5],
                        'all axes scaling values equal to %s'
                        % str(dat.D[-1, 5]),
                        verticalalignment='center')
            return self  # nothing interesting to plot
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        # pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self
项目:saapy    作者:ashapochka    | 项目源码 | 文件源码
def plot_ecdf(x, y, xlabel='attribute', legend='x'):
    """
    Plot distribution ECDF
    x should be sorted, y typically from 1/len(x) to 1

    TODO: function should be improved to plot multiple overlayed ecdfs
    """
    plt.plot(x, y, marker='.', linestyle='none')

    # Make nice margins
    plt.margins(0.02)

    # Annotate the plot
    plt.legend((legend,), loc='lower right')
    _ = plt.xlabel(xlabel)
    _ = plt.ylabel('ECDF')

    # Display the plot
    plt.show()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __spectrogram(self,data,samp_rate,window,per_lap,wlen,mult):        
        samp_rate = float(samp_rate)
        if not wlen:
            wlen = samp_rate/100.0

        npts=len(data)
        nfft = int(self.__nearest_pow_2(wlen * samp_rate))
        if nfft > npts:
            nfft = int(self.__nearest_pow_2(npts / 8.0))
        if mult is not None:
            mult = int(self.__nearest_pow_2(mult))
            mult = mult * nfft
        nlap = int(nfft * float(per_lap))
        end = npts / samp_rate

        window = signal.get_window(window,nfft)
        specgram, freq, time = mlab.specgram(data, Fs=samp_rate,window=window,NFFT=nfft,
                                            pad_to=mult, noverlap=nlap)
        return specgram,freq,time

    # Sort data by experimental conditions and plot spectrogram for analog signals (e.g. LFP)
项目:nelder_mead    作者:owruby    | 项目源码 | 文件源码
def plot2d_simplex(simplex, ind):
    fig_dir = "./"
    plt.cla()
    n = 1000
    x1 = np.linspace(-256, 1024, n)
    x2 = np.linspace(-256, 1024, n)
    X, Y = np.meshgrid(x1, x2)
    Z = np.sqrt(X ** 2 + Y ** 2)
    plt.contour(X, Y, Z, levels=list(np.arange(0, 1200, 10)))
    plt.gca().set_aspect("equal")
    plt.xlim((-256, 768))
    plt.ylim((-256, 768))

    plt.plot([simplex[0].x[0], simplex[1].x[0]],
             [simplex[0].x[1], simplex[1].x[1]], color="#000000")
    plt.plot([simplex[1].x[0], simplex[2].x[0]],
             [simplex[1].x[1], simplex[2].x[1]], color="#000000")
    plt.plot([simplex[2].x[0], simplex[0].x[0]],
             [simplex[2].x[1], simplex[0].x[1]], color="#000000")
    plt.savefig(os.path.join(fig_dir, "{:03d}.png".format(ind)))
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def on_train_end(self, logs={}):
        self.model.set_weights(self.best_weights)
        try: self.model.save('copy.model')
        except Exception: print('could not save model')
        if self.verbose > 0: print(' {:.1f} min'.format((time.time()-self.start)/60), flush=True)
        if self.plot:
            filename ='{}_{}_{}.png'.format(self.counter, self.name, self.model.name)
            filename = ''.join([x if x not in ',;\\/:><|?*\"' else '_' for x in filename])
            try: plt.savefig(os.path.join('.','plots', filename ))
            except Exception as e:print('can\'t save plots: {}'.format(e))
#        try:
#            self.model.save(os.path.join('.','weights', str(self.counter) + self.model.name))
#        except Exception as error:
#            print("Got an error while saving model: {}".format(error))
#        return




#%%
项目:s2g    作者:caesar0301    | 项目源码 | 文件源码
def test_point_projects_to_edge(self):
        # p = (114.83299055, 26.8892277)
        p = (121.428387, 31.027371)
        a = time.time()
        edges, segments = self.sg.point_projects_to_edges(p, 0.01)
        print(time.time() - a)

        if self.show_plots:
            plt.figure()
            s2g.plot_lines(MultiLineString(segments), color='orange')  # original roads
            for i in range(0, len(edges)):
                s, e = edges[i]
                sxy = self.sg.node_xy[s]
                exy = self.sg.node_xy[e]
                plt.plot([sxy[0], exy[0]], [sxy[1], exy[1]], color='green')  # graph edges
            plt.plot(p[0], p[1], color='red', markersize=12, marker='o')  # bridges
            plt.show()
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_gold(g1, g2, lc, p = 0):
    """
    plot sen/spe of g1 against g2
    only consider workers in lc
    """

    mv = crowd_model.mv_model(lc)
    s1 = []; s2 = []

    for w in g1.keys():
        if w in g2 and g1[w][p] != None and g2[w][p] != None and w in mv.dic_ss:
            s1.append(g1[w][p])
            s2.append(g2[w][p])

    plt.xticks((0, 0.5, 1), ("0", "0.5", "1"))
    plt.tick_params(labelsize = 25)
    plt.yticks((0, 0.5, 1), ("0", "0.5", "1"))

    plt.xlim(0,1)
    plt.ylim(0,1)
    plt.scatter(s1, s2, marker = '.', s=50, c = 'black')

    plt.xlabel('task 1 sen.', fontsize = 25)
    plt.ylabel('task 2 sen.', fontsize = 25)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def plot_multi_err():
    """
    """
    f = open('gzoo1000000_1_2_0.2_pickle.pkl')
    res = pickle.load(f)
    sing = res[(0.5, 'single')]
    multi = res[(0.5, 'multi')]
    (g1, g2, g3, g4) = load_gold()

    a = []; b = []
    for w in multi:
        a.append(abs(g2[w][0]- sing[w][0])); b.append(abs(g2[w][0] - multi[w][0]))


    plt.xlim(0,1); plt.ylim(0,1)
    plt.scatter(a, b, marker = '.')
    plt.plot([0, 1], [0, 1], ls="-", c=".5")

    plt.xlabel('single')
    plt.ylabel('multi')
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def plotValResults(self, save_path=None, label=None):
        if label is not None:
            accs = self.training_val_results['acc'][label]
            aucs = self.training_val_results['auc'][label]
        else:
            accs = self.training_val_results['acc']
            aucs = self.training_val_results['auc']
        plt.figure()
        plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(accs))], accs)
        plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(aucs))], aucs)
        plt.xlabel('Training step')
        plt.ylabel('Validation accuracy')
        plt.legend(['Accuracy','AUC'])
        if save_path is None:
            plt.show()
        else:
            plt.savefig(save_path)
        plt.close()
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def plotValResults(self, save_path=None, label=None):
        if label:
            accs = self.training_val_results_per_task['acc'][label]
            aucs = self.training_val_results_per_task['auc'][label]
        else:
            accs = self.training_val_results['acc']
            aucs = self.training_val_results['auc']
        plt.figure()
        plt.plot([i * self.accuracy_logged_every_n for i in range(len(accs))], accs)
        plt.plot([i * self.accuracy_logged_every_n for i in range(len(aucs))], aucs)
        plt.xlabel('Training step')
        plt.ylabel('Validation accuracy')
        plt.legend(['Accuracy','AUC'])
        if save_path is None:
            plt.show()
        else:
            plt.savefig(save_path)
项目:GANGogh    作者:rkjones4    | 项目源码 | 文件源码
def flush():
    prints = []

    for name, vals in _since_last_flush.items():
        prints.append("{}\t{}".format(name, np.mean(list(vals.values()))))
        _since_beginning[name].update(vals)

        x_vals = np.sort(list(_since_beginning[name].keys()))
        y_vals = [_since_beginning[name][x] for x in x_vals]

        plt.clf()
        plt.plot(x_vals, y_vals)
        plt.xlabel('iteration')
        plt.ylabel(name)
        plt.savefig('generated/'+name.replace(' ', '_')+'.jpg')

    print("iter {}\t{}".format(_iter[0], "\t".join(prints)))
    _since_last_flush.clear()

    with open('log.pkl', 'wb') as f:
        pickle.dump(dict(_since_beginning), f, 4)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_cmc(cmcs, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'lower right'
  # open new page for current plot
  figure = pyplot.figure()

  max_R = 0
  # plot the CMC curves
  for i in range(len(cmcs)):
    probs = bob.measure.cmc(cmcs[i])
    R = len(probs)
    pyplot.semilogx(range(1, R+1), probs, figure=figure, color=colors[i], label=labels[i])
    max_R = max(R, max_R)

  # change axes accordingly
  ticks = [int(t) for t in pyplot.xticks()[0]]
  pyplot.xlabel('Rank')
  pyplot.ylabel('Probability')
  pyplot.xticks(ticks, [str(t) for t in ticks])
  pyplot.axis([0, max_R, -0.01, 1.01])
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _plot_epc(scores_dev, scores_eval, colors, labels, title, fontsize=10, position=None):
  if position is None: position = 'upper center'
  # open new page for current plot
  figure = pyplot.figure()

  # plot the DET curves
  for i in range(len(scores_dev)):
    x,y = bob.measure.epc(scores_dev[i][0], scores_dev[i][1], scores_eval[i][0], scores_eval[i][1], 100)
    pyplot.plot(x, y, color=colors[i], label=labels[i])

  # change axes accordingly
  pyplot.xlabel('alpha')
  pyplot.ylabel('HTER')
  pyplot.title(title)
  pyplot.axis([-0.01, 1.01, -0.01, 0.51])
  pyplot.grid(True)
  pyplot.legend(loc=position, prop = {'size':fontsize})
  pyplot.title(title)

  return figure
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def main():
    losses   = []
    accuracy = []
    for echo in xrange(4000):
        logger.info('Iteration = {}'.format(echo))
        train_data = simulator(M=20)

        print train_data['text'][-1]

        loss       = learner(train_data, fr=0.)
        losses.append(loss)
        accuracy  += train_data['acc']

        if echo % 100 == 99:
            plt.plot(accuracy)
            plt.show()

    # pkl.dump(losses, open('losses.temp.pkl'))
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def main():
    losses   = []
    accuracy = []
    for echo in xrange(4000):
        logger.info('Iteration = {}'.format(echo))
        train_data = simulator(M=20)

        print train_data['text'][-1]

        loss       = learner(train_data, fr=0.)
        losses.append(loss)
        accuracy  += train_data['acc']

        if echo % 100 == 99:
            plt.plot(accuracy)
            plt.show()

    # pkl.dump(losses, open('losses.temp.pkl'))
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_mean_debye(sol, ax):
    x = np.log10(sol[0]["data"]["tau"])
    x = np.linspace(min(x), max(x),100)
    list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
#    list_best_rtd = [s["fit"]["best"] for s in sol]
    y = np.mean(list_best_rtd, axis=0)
    y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
    y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
    ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
    plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
    plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
    plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"])  , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")

    ax.set_xlabel("Relaxation time (s)", fontsize=14)
    ax.set_ylabel("Chargeability (%)", fontsize=14)
    plt.yticks(fontsize=14), plt.xticks(fontsize=14)
    plt.xscale("log")
    ax.set_xlim([1e-6, 1e1])
    ax.set_ylim([0, 5.0])
    ax.legend(loc=1, fontsize=12)
#    ax.set_title(title+" step method", fontsize=14)
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval(precisions, save_file):
    # markers = ["|", "D", "8", "v", "^", ">", "h", "H", "s", "*", "p", "d", "<"]
    markers = ["D", "p", 's', "*", "d", "8", "^", "H", "v", ">", "<", "h", "|"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.7, marker=markers[i],
                        markersize=8, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Fraction of Retrieved Documents')
    plt.ylabel('Precision')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def plot_info_retrieval_by_length(precisions, save_file):
    markers = ["o", "v", "8", "s", "p", "*", "h", "H", "^", "x", "D"]
    ticks = zip(*zip(*precisions)[1][0])[0]
    plt.xticks(range(len(ticks)), ticks)
    new_x = interpolate.interp1d(ticks, range(len(ticks)))(ticks)

    i = 0
    for model_name, val in precisions:
        fr, pr = zip(*val)
        plt.plot(new_x, pr, linestyle='-', alpha=0.6, marker=markers[i],
                        markersize=6, label=model_name)
        i += 1
        # plt.legend(model_name)
    plt.xlabel('Document Sorted by Length')
    plt.ylabel('Precision (%)')
    legend = plt.legend(loc='upper right', shadow=True)
    plt.savefig(save_file)
    plt.show()
项目:MulensModel    作者:rpoleski    | 项目源码 | 文件源码
def _store_plot_properties(
            self, color_list=None, marker_list=None, size_list=None,
            label_list=None, **kwargs):
        """
        Store plot properties for each data set.
        """
        if color_list is not None:
            self.plot_properties['color_list'] = color_list
        if marker_list is not None:
            self.plot_properties['marker_list'] = marker_list
        if size_list is not None:
            self.plot_properties['size_list'] = size_list
        if label_list is not None:
            self.plot_properties['label_list'] = label_list
        if len(kwargs) > 0:
            self.plot_properties['other_kwargs'] = kwargs
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def draw_results(self):
        metrics_log, cost_log = {}, {}
        for key, value in sorted(self._logs.items()):
            metrics_log[key], cost_log[key] = value[:-1], value[-1]

        for i, name in enumerate(sorted(self._metric_names)):
            plt.figure()
            plt.title("Metric Type: {}".format(name))
            for key, log in sorted(metrics_log.items()):
                xs = np.arange(len(log[i])) + 1
                plt.plot(xs, log[i], label="Data Type: {}".format(key))
            plt.legend(loc=4)
            plt.show()
            plt.close()

        plt.figure()
        plt.title("Cost")
        for key, loss in sorted(cost_log.items()):
            xs = np.arange(len(loss)) + 1
            plt.plot(xs, loss, label="Data Type: {}".format(key))
        plt.legend()
        plt.show()
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def get_graphs_from_logs():
        with open("Results/logs.dat", "rb") as file:
            logs = pickle.load(file)
        for (hus, ep, bt), log in logs.items():
            hus = list(map(lambda _c: str(_c), hus))
            title = "hus: {} ep: {} bt: {}".format(
                "- " + " -> ".join(hus) + " -", ep, bt
            )
            fb_log, acc_log = log["fb_log"], log["acc_log"]
            xs = np.arange(len(fb_log)) + 1
            plt.figure()
            plt.title(title)
            plt.plot(xs, fb_log)
            plt.plot(xs, acc_log, c="g")
            plt.savefig("Results/img/" + "{}_{}_{}".format(
                "-".join(hus), ep, bt
            ))
            plt.close()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def main(plot=True, env_name='CartPole-v0'):
    print("start training")
    ac = ActorCritic(env_name)

    # training
    ac()

    print("testing")
    ac.test(render=False)
    ac.test(render=False)
    ac.test(render=False)

    if plot:
        import matplotlib.pyplot as plt

        plt.plot(ac.rewards)
        plt.show()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def main(plot=True, env_name='CartPole-v0'):
    print("start training")
    rf = RFBaseline(env_name)

    # training
    rf()

    print("testing")
    rf.test(render=False)
    rf.test(render=False)
    rf.test(render=False)

    if plot:
        import matplotlib.pyplot as plt
        plt.plot(rf.rewards)
        plt.show()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def main(plot=True, env_name='CartPole-v0'):
    print("start training")
    rf = REINFORCE(env_name)

    # training
    rf()

    print("testing")
    rf.test(render=False)
    rf.test(render=False)
    rf.test(render=False)

    if plot:
        import matplotlib.pyplot as plt
        plt.plot(rf.rewards)
        plt.show()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def main(plot=True, env_name="Taxi-v2", test_init_state=77):
    print("start training")
    sarsa9 = Sarsa(env_name, alpha=0.9)
    sarsa5 = Sarsa(env_name, alpha=0.5)
    sarsa1 = Sarsa(env_name, alpha=0.1)

    # training
    sarsa9()
    sarsa5()
    sarsa1()

    print("testing")
    print("gamma=0.9")
    sarsa9.test(test_init_state)
    print("gamma=0.5")
    sarsa5.test(test_init_state)
    print("gamma=0.1")
    sarsa1.test(test_init_state)

    if plot:
        plt.plot(sarsa1.rewards, label="alpha=0.1", alpha=0.5)
        plt.plot(sarsa5.rewards, label="alpha=0.5", alpha=0.5)
        plt.plot(sarsa9.rewards, label="alpha=0.9", alpha=0.5)
        plt.legend()
        plt.show()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def main(plot=True, env_name="Taxi-v2", test_init_state=77):
    print("start training")
    ql9 = QLearing(env_name, alpha=0.9)
    ql5 = QLearing(env_name, alpha=0.5)
    ql1 = QLearing(env_name, alpha=0.1)
    # training
    ql9()
    ql5()
    ql1()
    ql9.test(test_init_state)
    ql5.test(test_init_state)
    ql1.test(test_init_state)

    if plot:
        import matplotlib.pyplot as plt

        plt.plot(ql1.rewards, label="alpha=0.1", alpha=0.5)
        plt.plot(ql5.rewards, label="alpha=0.5", alpha=0.5)
        plt.plot(ql9.rewards, label="alpha=0.9", alpha=0.5)
        plt.legend()
        plt.show()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_errors(self, valid, train, path, epoc=-1):
        '''Plot then save the figure of the error over the valid and train sets calculated during  the gradient descent. ***** CLASSIFICATION

        The figure won't be displayed.  
        valid: list of errors over the validation set
        train: list of errors over the train set
        path: path where to save the figure.
        '''
        fig = plt.figure()
        train_gp, = plt.plot(train, '-r')
        valid_gp, = plt.plot(valid, '-*g')
        if epoc >= 0:
            epoc = epoc - 1 # ploting starts from 0
            stop, = plt.plot([epoc, epoc], [0, max(valid + train) + 5], '--b', lw=2)
            plt.legend([train_gp, valid_gp, stop], ['train error', 'valid error', 'stop learning, epoch='+str(epoc + 1)], fancybox=True, shadow=True)
        else:
            plt.legend([train_gp, valid_gp], ['train error', 'valid error'], fancybox=True, shadow=True)

        plt.title('Train/valid error during the gradient descent')
        plt.xlabel(u"n° epoch")
        plt.ylabel('Error (100 - accuracy) %')
        fig.savefig(path, bbox_inches='tight')
        # to display the figure
        #plt.show()
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def plot_one_metric(self, models_metric, title):
        """

        :param models_metric:
        :param title:
        :return:
        """
        for index, model_metric in enumerate(models_metric):
            plt.plot(self.steps, model_metric, label=self.file_desc[index])
        plt.title(title)
        plt.legend()
        plt.xlabel('Number of batches')
        plt.ylabel('Score')
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def plot_trajectories(src_sent, src_encoding, idx):

    # encoding is (time_steps, hidden_dim)
    #pca = PCA(n_components=1)

    #pca_result = pca.fit_transform(src_encoding)
    times = np.arange(src_encoding.shape[0])
    plt.plot(times, src_encoding)
    plt.title(" ".join(src_sent))
    plt.xlabel('timestep')
    plt.ylabel('trajectories')
    plt.savefig("misc_hidden_cell_trajectories_"+str(idx), bbox_inches="tight")
    plt.close()
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen):
    db = read_data(path_test)
    X = create_sequences(db, maxlen, maxlen)
    y = create_sequences(db, maxlen, maxlen)
    X = np.reshape(X, (X.shape[0], X.shape[1], 1))
    y = np.reshape(y, (y.shape[0], y.shape[1], 1))

    # build the model: 1 layer LSTM
    print('Build model...')
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
    # note: in a situation where your input sequences have a variable length,
    # use input_shape=(None, nb_feature).
    model.add(LSTM(hidden_size, input_shape=(maxlen, input_size)))
    # For the decoder's input, we repeat the encoded input for each time step
    model.add(RepeatVector(maxlen))
    # The decoder RNN could be multiple layers stacked or a single layer
    model.add(LSTM(hidden_size, return_sequences=True))

    # For each of step of the output sequence, decide which character should be chosen
    model.add(TimeDistributed(Dense(1)))

    model.load_weights(save_dir + model_name)

    model.compile(loss='mae', optimizer='adam')
    model.summary()

    prediction = model.predict(X, batch_size, verbose=1, )
    prediction = prediction.flatten()
    # prediction_container = np.array(prediction).flatten()
    plt.plot(prediction.flatten()[:4000], label='prediction')
    plt.plot(y.flatten()[maxlen:4000 + maxlen], label='true')
    plt.legend()
    plt.show()

    store_prediction_and_ground_truth(model)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def show_pca(X, sentences):
    plt.figure()
    plt.plot(X[:,0], X[:,1], 'x')

    for x, sentence in zip(X, sentences):
        plt.text(x[0]-0.01, x[1]-0.01, sentence, horizontalalignment='center', verticalalignment='top')

    plt.show()
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def show_pca(X, sentences):
    plt.figure()
    plt.plot(X[:,0], X[:,1], 'x')

    for x, sentence in zip(X, sentences):
        plt.text(x[0]+0.01, x[1]-0.01, sentence, horizontalalignment='left', verticalalignment='top')

    plt.show()
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def model_choices():
    # no attention: model 43
    # full: model 19
    # no grammar/full : model 19

    # no attention/no grammar, +grammar, +attention, full model
    train = [89.09, 89.16, 90.47, 90.49]
    dev = [45.7, 45.8, 55.5, 56.1] 
    test = [40.2, 40.4, 56, 56.6]
    train_recall = [82.30, 82.35, 90.04, 90.05]
    dev_recall = [62.62, 62.63, 76.76, 77.78]
    test_recall = [59.43, 60.37, 69.8, 70.75]

    #plt.newfigure()

    X = 1 + np.arange(4)
    plt.plot(X, train_recall, '--')#, color='#85c1e5')
    plt.plot(X, train, '--x')#, color='#6182a6')
    plt.plot(X, dev_recall, '-+')#
    plt.plot(X, dev, '-o')#
    plt.plot(X, test_recall, '-^')#, color='#6182a6')
    plt.plot(X, test, '-')#, color='#052548')

    plt.ylim(0, 100)
    plt.xlim(0.5, 4.5)

    plt.xticks(X, ["Seq2Seq", "+ Grammar", "+ Attention", "Full Model"])
    plt.tight_layout()

    plt.legend(["Train recall", "Train accuracy", "Dev recall", "Dev accuracy", "Test recall", "Test accuracy"], loc='lower right')
    plt.savefig('./figures/model-choices.pdf')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def learning():
    with open('./data/train-stats.json', 'r') as fp:
        data = np.array(json.load(fp), dtype=np.float32)

    loss = data[:,0]
    train_acc = 100*data[:,1]
    dev_acc = 100*data[:,2]

    dev_mov_avg = movingaverage(dev_acc, 3)

    X = 1 + np.arange(len(data))
    plt.xlim(0, len(data)+1)

    #plt.plot(X, loss)
    #plt.ylabel('Loss')
    plt.xlabel('Training epoch', fontsize=20)

    #plt.gca().twinx()
    plt.plot(X, train_acc)
    plt.plot(X, dev_acc)
    plt.plot(X[1:-1], dev_mov_avg, '--')
    #plt.ylabel('Accuracy')
    plt.ylim(0, 100)

    plt.tight_layout()
    plt.legend(["Train Accuracy", "Dev Accuracy"], loc="lower right")
    plt.savefig('./figures/learning.pdf')
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def PCAdo(block, name):
    cor_ = np.corrcoef(block.T)
    eig_vals, eig_vecs = np.linalg.eig(cor_)
    tot = sum(eig_vals)
    var_exp = [(i / tot) * 100 for i in sorted(eig_vals, reverse=True)]
    cum_var_exp = np.cumsum(var_exp)
    loadings = (eig_vecs * np.sqrt(eig_vals))

    eig_vals = np.sort(eig_vals)[::-1]
    print('Eigenvalues')
    print(eig_vals)
    print('Variance Explained')
    print(var_exp)
    print('Total Variance Explained')
    print(cum_var_exp)
    print('Loadings')
    print(abs(loadings[:, 0]))

    PAcorrect = PA(block.shape[0], block.shape[1])

    print('Parallel Analisys')
    pa = (eig_vals - (PAcorrect - 1))
    print(pa)

    print('Correlation Matrix')
    print(pd.DataFrame.corr(block))

    plt.plot(range(1,len(pa)+1), pa, '-o')
    plt.grid(True)
    plt.xlabel('Fatores')
    plt.ylabel('Componentes')

    plt.savefig('imgs/PCA' + name, bbox_inches='tight')
    plt.clf()
    plt.cla()
#    plt.show()