Python matplotlib.pyplot 模块,step() 实例源码

我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用matplotlib.pyplot.step()

项目:BayesVP    作者:cameronliang    | 项目源码 | 文件源码
def continuum_test():

    import matplotlib.pyplot as plt
    import sys
    from Config import DefineParams
    config_fname = sys.argv[1]
    obs_spec = DefineParams(config_fname)    
    obs_spec.print_config_params()

    a0 = 0.1
    a1 = 0.2
    a2 = 0.3
    alpha = np.array([14,50,0.0,a0,a1,a2])
    model_flux = continuum_model_flux(alpha,obs_spec)
    x = ((obs_spec.wave-np.median(obs_spec.wave))/obs_spec.wave) # 
    #pl.step(x,model_flux,'k')
    v = cm_km*c*((obs_spec.wave-np.median(obs_spec.wave))/obs_spec.wave)
    plt.step(v,model_flux,'b')
    plt.step(v,obs_spec.flux,'k')
    plt.savefig('./temp.png')
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_cum_dist_ipis(self, train, t_min=None, t_max=None, d_min=0.0, d_max=200.0, ax=None, **kwargs):
        """Plot cumulative distribution of IPIs"""

        d_min = d_min * 1e-3  # ms
        d_max = d_max * 1e-3  # ms
        ipis = self.compute_ipis(train, t_min=t_min, t_max=t_max)
        y_min = np.sum(ipis <= d_min)
        ipis = ipis[d_min < ipis]
        ipis = ipis[ipis <= d_max]
        x = np.unique(ipis)
        y = np.array([y_min + np.sum(ipis <= e) for e in x])
        x = np.insert(x, 0, [d_min])
        y = np.insert(y, 0, [y_min])
        x = np.append(x, [d_max])
        y = np.append(y, y[-1])

        if ax is None:
            plt.style.use('seaborn-paper')
            plt.figure()
            ax = plt.gca()
            ax.set_xlabel("duration (ms)")
            ax.set_ylabel("number")
        ax.step(1e+3 * x, y, where='post', **kwargs)

        return
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_cum_dist_isis(self, train, t_min=None, t_max=None, d_min=0.0, d_max=200.0, ax=None, **kwargs):
        """Plot cumulative distribution of ISIs"""

        d_min = d_min * 1e-3  # ms
        d_max = d_max * 1e-3  # ms
        isis = self.compute_isis(train, t_min=t_min, t_max=t_max)
        y_min = np.sum(isis <= d_min)
        isis = isis[d_min < isis]
        isis = isis[isis <= d_max]
        x = np.unique(isis)
        y = np.array(y_min + [np.sum(isis <= e) for e in x])
        x = np.insert(x, 0, [d_min])
        y = np.insert(y, 0, [y_min])
        x = np.append(x, [d_max])
        y = np.append(y, y[-1])

        if ax is None:
            plt.style.use('seaborn-paper')
            plt.figure()
            ax = plt.gca()
            ax.set_xlabel("duration (ms)")
            ax.set_ylabel("number")
        ax.step(1e+3 * x, y, where='post', **kwargs)

        return
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_cum_dist_ipis(self, train, t_min=None, t_max=None, d_min=0.0, d_max=200.0, ax=None, **kwargs):
        """Plot cumulative distribution of IPIs"""

        d_min = d_min * 1e-3  # ms
        d_max = d_max * 1e-3  # ms
        ipis = self.compute_ipis(train, t_min=t_min, t_max=t_max)
        y_min = np.sum(ipis <= d_min)
        ipis = ipis[d_min < ipis]
        ipis = ipis[ipis <= d_max]
        x = np.unique(ipis)
        y = np.array([y_min + np.sum(ipis <= e) for e in x])
        x = np.insert(x, 0, [d_min])
        y = np.insert(y, 0, [y_min])
        x = np.append(x, [d_max])
        y = np.append(y, y[-1])

        if ax is None:
            plt.style.use('seaborn-paper')
            plt.figure()
            ax = plt.gca()
            ax.set_xlabel("duration (ms)")
            ax.set_ylabel("number")
        ax.step(1e+3 * x, y, where='post', **kwargs)

        return
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_cum_dist_isis(self, train, t_min=None, t_max=None, d_min=0.0, d_max=200.0, ax=None, **kwargs):
        """Plot cumulative distribution of ISIs"""

        d_min = d_min * 1e-3  # ms
        d_max = d_max * 1e-3  # ms
        isis = self.compute_isis(train, t_min=t_min, t_max=t_max)
        y_min = np.sum(isis <= d_min)
        isis = isis[d_min < isis]
        isis = isis[isis <= d_max]
        x = np.unique(isis)
        y = np.array(y_min + [np.sum(isis <= e) for e in x])
        x = np.insert(x, 0, [d_min])
        y = np.insert(y, 0, [y_min])
        x = np.append(x, [d_max])
        y = np.append(y, y[-1])

        if ax is None:
            plt.style.use('seaborn-paper')
            plt.figure()
            ax = plt.gca()
            ax.set_xlabel("duration (ms)")
            ax.set_ylabel("number")
        ax.step(1e+3 * x, y, where='post', **kwargs)

        return
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_cum_dist_isis(self, train, t_min=None, t_max=None, d_min=0.0, d_max=200.0, ax=None, **kwargs):
        """Plot cumulative distribution of ISIs"""

        d_min = d_min * 1e-3  # ms
        d_max = d_max * 1e-3  # ms
        isis = self.compute_isis(train, t_min=t_min, t_max=t_max)
        y_min = np.sum(isis <= d_min)
        isis = isis[d_min < isis]
        isis = isis[isis <= d_max]
        x = np.unique(isis)
        y = np.array(y_min + [np.sum(isis <= e) for e in x])
        x = np.insert(x, 0, [d_min])
        y = np.insert(y, 0, [y_min])
        x = np.append(x, [d_max])
        y = np.append(y, y[-1])

        if ax is None:
            plt.style.use('seaborn-paper')
            plt.figure()
            ax = plt.gca()
            ax.set_xlabel("duration (ms)")
            ax.set_ylabel("number")
        ax.step(1e+3 * x, y, where='post', **kwargs)

        return
项目:BayesVP    作者:cameronliang    | 项目源码 | 文件源码
def plot_model_comparison(self,redshift,dv,central_wave=None):
        """
        Plot best fit model onto spectrum for visual inspection 
        """
        c = 299792.485 # [km/s]

        if central_wave == None:
            # Use the first transition as the central wavelength
            central_wave = self.config_param.transitions_params_array[0][0][0][1]
        else:
            central_wave = float(central_wave)

        obs_spec_wave = self.config_param.wave / (1+redshift) 
        obs_spec_dv = c*(obs_spec_wave - central_wave) / central_wave
        plt.rc('text', usetex=True)

        plt.figure(1)
        plt.step(obs_spec_dv,self.config_param.flux,'k',label=r'$\rm Data$')
        plt.step(obs_spec_dv,self.model_flux,'b',lw=2,label=r'$\rm Best\,Fit$')
        plt.step(obs_spec_dv,self.config_param.dflux,'r')
        plt.axhline(1,ls='--',c='g',lw=1.2)
        plt.axhline(0,ls='--',c='g',lw=1.2)
        plt.ylim([-0.1,1.4])
        plt.xlim([-dv,dv])
        plt.xlabel(r'$dv\,[\rm km/s]$')
        plt.ylabel(r'$\rm Normalized\,Flux$')
        plt.legend(loc=3)

        output_name = self.config_param.processed_product_path + '/modelspec_' + self.config_param.chain_short_fname + '.pdf' 
        plt.savefig(output_name,bbox_inches='tight',dpi=100)
        plt.clf()
        print('Written %s' % output_name)
项目:BayesVP    作者:cameronliang    | 项目源码 | 文件源码
def produce_simplespec(wave_begin,wave_end,dv,logN,b,z):

    import matplotlib.pyplot as plt
    wave = WavelengthArray(wave_begin,wave_end,dv)

    flux = simple_spec(logN,b,z,wave,'H','I')

    plt.step(wave,flux)
    plt.xlim([1214,1218])
    plt.ylim([0,1.3])
    plt.savefig('./temp.png')
项目:BayesVP    作者:cameronliang    | 项目源码 | 文件源码
def plot_spec(self):
        import matplotlib.pyplot as pl
        pl.step(self.wave,self.flux,color='k')
        pl.step(self.wave,self.dflux,color='r')
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def plot_allocation(self):

        """
        This function ...
        :return:
        """

        # Determine the path to the plot file
        plot_path = fs.join(self.output_path, "allocation.pdf")

        # Initialize figure
        plt.figure()
        plt.clf()

        # Plot the memory usage of the root process
        plt.plot(self.data[0].times, self.data[0].memory, label="total memory usage")

        # Plot the memory allocation of the root process
        plt.step(self.allocation.times, self.allocation.cumulative, where="post", linestyle="--", label="allocated array memory")

        # Set the axis labels
        plt.xlabel("Time (s)", fontsize='large')
        plt.ylabel("Memory usage (GB)", fontsize='large')

        # Set the plot title
        plt.title("Memory (de)allocation")

        # Set the legend
        plt.legend(loc='lower right', prop={'size': 8})

        # Save the figure
        plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
        plt.close()

# -----------------------------------------------------------------
项目:CAAPR    作者:Stargrazer82301    | 项目源码 | 文件源码
def plot_allocation(self):

        """
        This function ...
        :return:
        """

        # Determine the path to the plot file
        plot_path = fs.join(self.output_path, "allocation.pdf")

        # Initialize figure
        plt.figure()
        plt.clf()

        # Plot the memory usage of the root process
        plt.plot(self.data[0].times, self.data[0].memory, label="total memory usage")

        # Plot the memory allocation of the root process
        plt.step(self.allocation.times, self.allocation.cumulative, where="post", linestyle="--", label="allocated array memory")

        # Set the axis labels
        plt.xlabel("Time (s)", fontsize='large')
        plt.ylabel("Memory usage (GB)", fontsize='large')

        # Set the plot title
        plt.title("Memory (de)allocation")

        # Set the legend
        plt.legend(loc='lower right', prop={'size': 8})

        # Save the figure
        plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
        plt.close()

# -----------------------------------------------------------------
项目:augment3D    作者:yulkang    | 项目源码 | 文件源码
def ecdf(sample=None):
    import numpy as np
    import statsmodels.api as sm
    import matplotlib.pyplot as plt

    if sample is None:
        sample = np.random.uniform(0, 1, 50)

    f = sm.distributions.ECDF(sample)
    x = np.unique(sample)
    y = f(x)
    plt.step(x, y, where='post')
    plt.ylim((0,1))
    plt.show()
项目:py-control    作者:cheind    | 项目源码 | 文件源码
def target(self, t):
        """Return setpoint position for particle to reach.
        Simple step function at t == 1. and t==15.
        """
        if t < 5. or t >= 15.:
            return np.asarray([0.])
        else:
            return np.array([1.])
项目:py-control    作者:cheind    | 项目源码 | 文件源码
def run():

    # Various PID controller parameters to run simulation with 
    pid_params = [
        dict(kp=0.1, ki=0., kd=0.),
        dict(kp=1.5, ki=0., kd=0.5),
    ]

    # Additionally tune PID parameters
    params = ctrl.tune_twiddle(params=dict(kp=0., ki=0., kd=0.), costfunction=runner, eps=0.001)
    pid_params.append(params)

    # Run simulation for each set of PID params
    handles = []
    for idx, c in enumerate(pid_params):
        process = MoveParticleProcess(particle=ctrl.Particle(x0=[0], v0=[0], inv_mass=1.), pid=ctrl.PID(**c))
        result = process.loop(tsim=100, dt=0.1)

        if idx == 0:
            fh, = plt.step(result['t'], result['y'], label='target')    
            handles.append(fh)    

        xh, = plt.plot(result['t'], result['x'], label='pid kp {:.2f} kd {:.2f} ki {:.2f}'.format(c['kp'], c['kd'], c['ki']))
        handles.append(xh)

    plt.title('Particle trajectory')
    plt.legend(handles=handles, loc=1)
    plt.xlabel('Time $sec$')
    plt.ylabel('Position $m$')
    plt.show()
项目:plasma    作者:jnkh    | 项目源码 | 文件源码
def hist_alarms(self,alarms,title_str='alarms',save_figure=False):
        T_min_warn = self.T_min_warn
        T_max_warn = self.T_max_warn
        if len(alarms) > 0:
            alarms = alarms / 1000.0
            alarms = sort(alarms)
            T_min_warn /= 1000.0
            T_max_warn /= 1000.0
            figure()
            alarms += 0.0001
            bins=logspace(log10(min(alarms)),log10(max(alarms)),40)
            #bins=linspace(min(alarms),max(alarms),100)
            #        hist(alarms,bins=bins,alpha=1.0,histtype='step',normed=True,log=False,cumulative=-1)
            #
            pyplot.step(np.concatenate((alarms[::-1], alarms[[0]])), 1.0*np.arange(alarms.size+1)/(alarms.size))

            gca().set_xscale('log')
            axvline(T_min_warn,color='r')
            axvline(T_max_warn,color='r')
            xlabel('TTD [s]')
            ylabel('Accumulated fraction of detected disruptions')
            xlim([1e-4,max(alarms)*10])
            ylim([0,1])
            grid()
            title(title_str)
            show()
        if save_figure:
            savefig('accum_disruptions.png',bbox_inches='tight')
        else:
            print(title_str + ": No alarms!")
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def test_softmax(seq,targets,n_epochs=250,n_seq=182):
    """ Test RNN with softmax outputs. """

    length = len(seq)
    real=0
    for k in range(0, length):
        n_hidden = 10
        n_in = 40
        n_classes = 3
        n_out = n_classes  # restricted to single softmax per time step
        np.random.seed(0)

        train_sample = copy.deepcopy(seq)
        train_lable = copy.deepcopy(targets)

        test_sample = seq[k]

        train_sample = np.delete(train_sample, k, 0)
        train_lable = np.delete(train_lable, k, 0)

        model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                        learning_rate=0.001, learning_rate_decay=0.999,
                        n_epochs=n_epochs, activation='sigmoid',
                        output_type='softmax', use_symbolic_softmax=False)

        model.fit(train_sample, train_lable, validation_frequency=1000)

        guess = model.predict_proba(test_sample)
        tmp_list = np.ndarray.tolist(guess.T)
        if (tmp_list.index(max(tmp_list)) == targets[k][0]):
            print k,True
            real+=1
        else:
            print k,False
    print 1.0*real / n_seq
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def test_softmax(seq,targets,n_epochs=250,n_seq=182):
    """ Test RNN with softmax outputs. """

    length = len(seq)
    real=0
    for k in range(0, length):
        n_hidden = 10
        n_in = 40
        n_classes = 3
        n_out = n_classes  # restricted to single softmax per time step
        np.random.seed(0)

        train_sample = copy.deepcopy(seq)
        train_lable = copy.deepcopy(targets)

        test_sample = seq[k]

        train_sample = np.delete(train_sample, k, 0)
        train_lable = np.delete(train_lable, k, 0)

        model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                        learning_rate=0.001, learning_rate_decay=0.999,
                        n_epochs=n_epochs, activation='sigmoid',
                        output_type='softmax', use_symbolic_softmax=False)

        model.fit(train_sample, train_lable, validation_frequency=1000)

        guess = model.predict_proba(test_sample)
        tmp_list = np.ndarray.tolist(guess.T)
        if (tmp_list.index(max(tmp_list)) == targets[k][0]):
            print k,True
            real+=1
        else:
            print k,False
    print 1.0*real / n_seq
项目:python-machine-learning-book    作者:jeremyn    | 项目源码 | 文件源码
def plot_manual_pca_transformation(X, y):
    cov_mat = np.cov(X.T)
    eigenvalues, eigenvectors = np.linalg.eig(cov_mat)
    print("\nEigenvalues \n%s" % eigenvalues)

    tot = sum(eigenvalues)
    var_exp = [i/tot for i in sorted(eigenvalues, reverse=True)]
    cum_var_exp = np.cumsum(var_exp)

    plt.bar(
        range(1, 14),
        var_exp,
        alpha=0.5,
        align='center',
        label='individual explained variance',
    )
    plt.step(
        range(1, 14),
        cum_var_exp,
        where='mid',
        label='cumulative explained variance',
    )
    plt.ylabel('Explained variance ratio')
    plt.xlabel('Principal components')
    plt.legend(loc='best')
    plt.show()

    eigenpairs = [
        (np.abs(eigenvalue), eigenvectors[:, index])
        for index, eigenvalue
        in enumerate(eigenvalues)
    ]
    eigenpairs.sort(reverse=True)

    w = np.hstack((
        eigenpairs[0][1][:, np.newaxis],
        eigenpairs[1][1][:, np.newaxis],
    ))
    print('Matrix W:\n%s\n' % w)

    X_pca = X.dot(w)

    colors = ['r', 'b', 'g']
    markers = ['s', 'x', 'o']
    for label, color, marker in zip(np.unique(y), colors, markers):
        plt.scatter(
            X_pca[y == label, 0],
            X_pca[y == label, 1],
            c=color,
            label=label,
            marker=marker,
        )

    plt.xlabel('PC 1')
    plt.ylabel('PC 2')
    plt.legend(loc='lower left')
    plt.show()

    print(X_pca[0])
项目:python-machine-learning-book    作者:jeremyn    | 项目源码 | 文件源码
def plot_sklearn_pca_with_lr(X_train, X_test, y_train, y_test):
    pca = PCA()
    pca.fit(X_train)
    print(pca.explained_variance_ratio_)

    plt.bar(
        range(1, 14),
        pca.explained_variance_ratio_,
        alpha=0.5,
        align='center',
    )
    plt.step(
        range(1, 14),
        np.cumsum(pca.explained_variance_ratio_),
        where='mid',
    )
    plt.ylabel('Explained variance ratio')
    plt.xlabel('Principal components')
    plt.show()

    pca = PCA(n_components=2)
    X_train_pca = pca.fit_transform(X_train)
    X_test_pca = pca.transform(X_test)

    plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
    plt.xlabel('PC 1')
    plt.ylabel('PC 2')
    plt.show()

    lr = LogisticRegression()
    lr = lr.fit(X_train_pca, y_train)

    plot_decision_regions(X_train_pca, y_train, classifier=lr)
    plt.xlabel('PC 1')
    plt.ylabel('PC 2')
    plt.legend(loc='lower left')
    plt.show()

    plot_decision_regions(X_test_pca, y_test, classifier=lr)
    plt.xlabel('PC 1')
    plt.ylabel('PC 2')
    plt.legend(loc='lower left')
    plt.show()
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_signal_and_peaks(self, t_min=None, t_max=None, thold=1.0):
        """Plot signal and peaks"""

        # Retrieve signal data.
        path = self.signal_writer.data_path
        data = np.memmap(path, dtype=np.float32, mode='r')
        data = np.reshape(data, (-1, self.nb_electrodes))

        # Retrieve threshold data.
        mad_path = self.mad_writer.data_path
        mad_data = np.memmap(mad_path, dtype=np.float32, mode='r')
        mad_data = np.reshape(mad_data, (-1, self.nb_electrodes))

        if t_min is None:
            i_min = 0
        else:
            i_min = int(t_min * self.sampling_rate)
        if t_max is None:
            i_max = data.shape[0]
        else:
            i_max = int(t_max * self.sampling_rate) + 1

        plt.figure()
        # Compute scaling factor.
        y_scale = 0.0
        for k in range(0, self.nb_electrodes):
            y = data[i_min:i_max, k]
            y_scale = max(y_scale, 2.0 * np.amax(np.abs(y)))
        # Plot electrode signals.
        for k in range(0, self.nb_electrodes):
            y = data[i_min:i_max, k]
            y_offset = float(k)
            x = np.arange(i_min, i_max).astype(np.float32) / self.sampling_rate
            plt.plot(x, y / y_scale + y_offset, c='C0', zorder=1)
        # Plot MADs.
        for k in range(0, self.nb_electrodes):
            mads = mad_data[:, k]
            i = np.arange(0, mads.size) * self.chunk_size
            x = i.astype(np.float32) / self.sampling_rate
            mask = np.array([t_min <= t and t <= t_max for t in x])
            x = x[mask]
            y = thold * mads[mask]
            y_offset = float(k)
            plt.step(x, + y / y_scale + y_offset, where='post', c='C3')
            plt.step(x, - y / y_scale + y_offset, where='post', c='C3')
        # Plot generated peaks.
        x = [t for t in self.generated_peak_train if t_min <= t and t <= t_max]
        y = [-1.0 for _ in x]
        plt.scatter(x, y, c='C2', marker='|', zorder=2)
        # Plot detected peaks.
        detected_peak_trains = self.detected_peak_trains
        for k in range(0, self.nb_electrodes):
            x = [t for t in detected_peak_trains[k] if t_min <= t and t <= t_max]
            y = [float(k) for _ in x]
            plt.scatter(x, y, c='C1', marker='|', zorder=2)
        plt.xlabel("time (s)")
        plt.ylabel("electrode")
        plt.tight_layout()
        plt.show()

        return
项目:spyking-circus-ort    作者:spyking-circus    | 项目源码 | 文件源码
def plot_signal_and_peaks(self, t_min=None, t_max=None, thold=1.0):
        """Plot signal and peaks"""

        # Retrieve signal data.
        path = self.signal_writer_kwargs['data_path']
        data = np.memmap(path, dtype=np.float32, mode='r')
        data = np.reshape(data, (-1, self.nb_channels))

        # Retrieve threshold data.
        mad_path = self.mad_writer_kwargs['data_path']
        mad_data = np.memmap(mad_path, dtype=np.float32, mode='r')
        mad_data = np.reshape(mad_data, (-1, self.nb_channels))

        if t_min is None:
            i_min = 0
        else:
            i_min = int(t_min * self.sampling_rate)
        if t_max is None:
            i_max = data.shape[0]
        else:
            i_max = int(t_max * self.sampling_rate) + 1

        plt.figure()
        # Compute scaling factor.
        y_scale = 0.0
        for k in range(0, self.nb_channels):
            y = data[i_min:i_max, k]
            y_scale = max(y_scale, 2.0 * np.amax(np.abs(y)))
        # Plot electrode signals.
        for k in range(0, self.nb_channels):
            y = data[i_min:i_max, k]
            y_offset = float(k)
            x = np.arange(i_min, i_max).astype(np.float32) / self.sampling_rate
            plt.plot(x, y / y_scale + y_offset, c='C0', zorder=1)
        # Plot MADs.
        for k in range(0, self.nb_channels):
            mads = mad_data[:, k]
            i = np.arange(0, mads.size) * self.chunk_size
            x = i.astype(np.float32) / self.sampling_rate
            mask = np.array([t_min <= t <= t_max for t in x])
            x = x[mask]
            y = thold * mads[mask]
            y_offset = float(k)
            plt.step(x, + y / y_scale + y_offset, where='post', c='C3')
            plt.step(x, - y / y_scale + y_offset, where='post', c='C3')
        # Plot generated peaks.
        x = [t for t in self.generated_peak_train if t_min <= t <= t_max]
        y = [-1.0 for _ in x]
        plt.scatter(x, y, c='C2', marker='|', zorder=2)
        # Plot detected peaks.
        detected_peak_trains = self.detected_peak_trains
        for k in range(0, self.nb_channels):
            x = [t for t in detected_peak_trains[k] if t_min <= t <= t_max]
            y = [float(k) for _ in x]
            plt.scatter(x, y, c='C1', marker='|', zorder=2)
        plt.xlabel("time (s)")
        plt.ylabel("electrode")
        plt.tight_layout()
        plt.show()

        return
项目:diffusion-maps    作者:jmbr    | 项目源码 | 文件源码
def plot_results(data: np.array, eigenvalues: np.array,
                 eigenvectors: np.array) -> None:
    """Plot results.

    Plots three figures. The first one is shows the modulus of the spectrum
    of the kernel in the diffusion map calculation.  The second displays the
    original (2D) data colored by the value of each diffusion map.  The third
    figure displays the data, as trasnformed by the first two diffusion maps.

    Parameters
    ----------
    data : np.array
        Original (or downsampled) data set.
    eigenvalues : np.array
        Eigenvalues of the kernel matrix.
    eigenvectors : np.array
        Eigenvectors of the kernel matrix. The zeroth axis indexes each
        vector.

    """
    x = data[:, 0]
    y = data[:, 1]

    num_eigenvectors = max(eigenvectors.shape[0]-1, default.num_eigenpairs-1)

    plt.figure(1)
    plt.step(np.arange(1, eigenvalues.shape[0]), np.abs(eigenvalues[1:]))
    plt.xticks(range(1, eigenvalues.shape[0]))
    plt.xlabel('Eigenvalue index')
    plt.ylabel('| Eigenvalue |')
    plt.title('Eigenvalues')

    plt.figure(2)
    rows, cols = get_rows_and_columns(num_eigenvectors)
    for k in range(1, eigenvectors.shape[0]):
        plt.subplot(rows, cols, k)
        plt.scatter(x, y, c=eigenvectors[k, :], cmap='RdBu_r', rasterized=True)
        plt.xlabel('$x$')
        plt.ylabel('$y$')
        plt.axis('off')
        plt.title('$\\psi_{{{}}}$'.format(k))

    plt.figure(3)
    plt.scatter(eigenvectors[1, :], eigenvectors[2, :],
                color='black', alpha=0.5)
    plt.xlabel('$\\psi_1$')
    plt.ylabel('$\\psi_2$')
    plt.title('Data set in diffusion map space')

    # plt.tight_layout()
    plt.show()
项目:VideoGAN    作者:amartya18x    | 项目源码 | 文件源码
def test_binary(multiple_out=False, n_epochs=250):
    """ Test RNN with binary outputs. """
    n_hidden = 10
    n_in = 5
    if multiple_out:
        n_out = 2
    else:
        n_out = 1
    n_steps = 10
    n_seq = 100

    np.random.seed(0)
    # simple lag test
    seq = np.random.randn(n_seq, n_steps, n_in)
    targets = np.zeros((n_seq, n_steps, n_out))

    # whether lag 1 (dim 3) is greater than lag 2 (dim 0)
    targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])

    if multiple_out:
        # whether product of lag 1 (dim 4) and lag 1 (dim 2)
        # is less than lag 2 (dim 0)
        targets[:, 2:, 1] = np.cast[np.int](
            (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])

    model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                    learning_rate=0.001, learning_rate_decay=0.999,
                    n_epochs=n_epochs, activation='tanh', output_type='binary')

    model.fit(seq, targets, validation_frequency=1000)

    seqs = xrange(10)

    plt.close('all')
    for seq_num in seqs:
        fig = plt.figure()
        ax1 = plt.subplot(211)
        plt.plot(seq[seq_num])
        ax1.set_title('input')
        ax2 = plt.subplot(212)
        true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')

        guess = model.predict_proba(seq[seq_num])
        guessed_targets = plt.step(xrange(n_steps), guess)
        plt.setp(guessed_targets, linestyle='--', marker='d')
        for i, x in enumerate(guessed_targets):
            x.set_color(true_targets[i].get_color())
        ax2.set_ylim((-0.1, 1.1))
        ax2.set_title('solid: true output, dashed: model output (prob)')
项目:VideoGAN    作者:amartya18x    | 项目源码 | 文件源码
def test_softmax(n_epochs=250):
    """ Test RNN with softmax outputs. """
    n_hidden = 10
    n_in = 5
    n_steps = 10
    n_seq = 100
    n_classes = 3
    n_out = n_classes  # restricted to single softmax per time step

    np.random.seed(0)
    # simple lag test
    seq = np.random.randn(n_seq, n_steps, n_in)
    targets = np.zeros((n_seq, n_steps), dtype=np.int)

    thresh = 0.5
    # if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
    # class 1
    # if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
    # class 2
    # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
    # class 0
    targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
    targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
    #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])

    model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                    learning_rate=0.001, learning_rate_decay=0.999,
                    n_epochs=n_epochs, activation='tanh',
                    output_type='softmax', use_symbolic_softmax=False)

    model.fit(seq, targets, validation_frequency=1000)

    seqs = xrange(10)

    plt.close('all')
    for seq_num in seqs:
        fig = plt.figure()
        ax1 = plt.subplot(211)
        plt.plot(seq[seq_num])
        ax1.set_title('input')
        ax2 = plt.subplot(212)

        # blue line will represent true classes
        true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')

        # show probabilities (in b/w) output by model
        guess = model.predict_proba(seq[seq_num])
        guessed_probs = plt.imshow(guess.T, interpolation='nearest',
                                   cmap='gray')
        ax2.set_title('blue: true class, grayscale: probs assigned by model')
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def test_binary(multiple_out=False, n_epochs=250):
    """ Test RNN with binary outputs. """
    n_hidden = 10
    n_in = 5
    if multiple_out:
        n_out = 2
    else:
        n_out = 1
    n_steps = 10
    n_seq = 100

    np.random.seed(0)
    # simple lag test
    seq = np.random.randn(n_seq, n_steps, n_in)
    targets = np.zeros((n_seq, n_steps, n_out))

    # whether lag 1 (dim 3) is greater than lag 2 (dim 0)
    targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])

    if multiple_out:
        # whether product of lag 1 (dim 4) and lag 1 (dim 2)
        # is less than lag 2 (dim 0)
        targets[:, 2:, 1] = np.cast[np.int](
            (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])

    model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                    learning_rate=0.001, learning_rate_decay=0.999,
                    n_epochs=n_epochs, activation='tanh', output_type='binary')

    model.fit(seq, targets, validation_frequency=1000)

    seqs = xrange(10)

    plt.close('all')
    for seq_num in seqs:
        fig = plt.figure()
        ax1 = plt.subplot(211)
        plt.plot(seq[seq_num])
        ax1.set_title('input')
        ax2 = plt.subplot(212)
        true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')

        guess = model.predict_proba(seq[seq_num])
        guessed_targets = plt.step(xrange(n_steps), guess)
        plt.setp(guessed_targets, linestyle='--', marker='d')
        for i, x in enumerate(guessed_targets):
            x.set_color(true_targets[i].get_color())
        ax2.set_ylim((-0.1, 1.1))
        ax2.set_title('solid: true output, dashed: model output (prob)')
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def test_binary(multiple_out=False, n_epochs=250):
    """ Test RNN with binary outputs. """
    n_hidden = 10
    n_in = 5
    if multiple_out:
        n_out = 2
    else:
        n_out = 1
    n_steps = 10
    n_seq = 100

    np.random.seed(0)
    # simple lag test
    seq = np.random.randn(n_seq, n_steps, n_in)
    targets = np.zeros((n_seq, n_steps, n_out))

    # whether lag 1 (dim 3) is greater than lag 2 (dim 0)
    targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])

    if multiple_out:
        # whether product of lag 1 (dim 4) and lag 1 (dim 2)
        # is less than lag 2 (dim 0)
        targets[:, 2:, 1] = np.cast[np.int](
            (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])

    model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                    learning_rate=0.001, learning_rate_decay=0.999,
                    n_epochs=n_epochs, activation='tanh', output_type='binary')

    model.fit(seq, targets, validation_frequency=1000)

    seqs = xrange(10)

    plt.close('all')
    for seq_num in seqs:
        fig = plt.figure()
        ax1 = plt.subplot(211)
        plt.plot(seq[seq_num])
        ax1.set_title('input')
        ax2 = plt.subplot(212)
        true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')

        guess = model.predict_proba(seq[seq_num])
        guessed_targets = plt.step(xrange(n_steps), guess)
        plt.setp(guessed_targets, linestyle='--', marker='d')
        for i, x in enumerate(guessed_targets):
            x.set_color(true_targets[i].get_color())
        ax2.set_ylim((-0.1, 1.1))
        ax2.set_title('solid: true output, dashed: model output (prob)')
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def test_binary(multiple_out=False, n_epochs=250):
    """ Test RNN with binary outputs. """
    n_hidden = 10
    n_in = 5
    if multiple_out:
        n_out = 2
    else:
        n_out = 1
    n_steps = 10
    n_seq = 100

    np.random.seed(0)
    # simple lag test
    seq = np.random.randn(n_seq, n_steps, n_in)
    targets = np.zeros((n_seq, n_steps, n_out))

    # whether lag 1 (dim 3) is greater than lag 2 (dim 0)
    targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])

    if multiple_out:
        # whether product of lag 1 (dim 4) and lag 1 (dim 2)
        # is less than lag 2 (dim 0)
        targets[:, 2:, 1] = np.cast[np.int](
            (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])

    model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
                    learning_rate=0.001, learning_rate_decay=0.999,
                    n_epochs=n_epochs, activation='tanh', output_type='binary')

    model.fit(seq, targets, validation_frequency=1000)

    seqs = xrange(10)

    plt.close('all')
    for seq_num in seqs:
        fig = plt.figure()
        ax1 = plt.subplot(211)
        plt.plot(seq[seq_num])
        ax1.set_title('input')
        ax2 = plt.subplot(212)
        true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')

        guess = model.predict_proba(seq[seq_num])
        guessed_targets = plt.step(xrange(n_steps), guess)
        plt.setp(guessed_targets, linestyle='--', marker='d')
        for i, x in enumerate(guessed_targets):
            x.set_color(true_targets[i].get_color())
        ax2.set_ylim((-0.1, 1.1))
        ax2.set_title('solid: true output, dashed: model output (prob)')