Python numpy 模块,savetxt() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.savetxt()

项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def calc_auc(y_pred_proba, labels, exp_run_folder, classifier, fold):

    auc = roc_auc_score(labels, y_pred_proba)
    fpr, tpr, thresholds = roc_curve(labels, y_pred_proba)
    curve_roc = np.array([fpr, tpr])
    dataile_id = open(exp_run_folder+'/data/roc_{}_{}.txt'.format(classifier, fold), 'w+')
    np.savetxt(dataile_id, curve_roc)
    dataile_id.close()
    plt.plot(fpr, tpr, label='ROC curve: AUC={0:0.2f}'.format(auc))
    plt.xlabel('1-Specificity')
    plt.ylabel('Sensitivity')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.grid(True)
    plt.title('ROC Fold {}'.format(fold))
    plt.legend(loc="lower left")
    plt.savefig(exp_run_folder+'/data/roc_{}_{}.pdf'.format(classifier, fold), format='pdf')
    return auc
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def main():
    files = tf.gfile.Glob(flags.FLAGS.src_path_1)
    labels_uni = np.zeros([4716,1])
    labels_matrix = np.zeros([4716,4716])
    for file in files:
        labels_all = get_video_input_feature(file)
        print(len(labels_all[0][2]),len(labels_all[0][3]),len(labels_all[0][4]),len(labels_all[0][5]))
        """
        for labels in labels_all:
            for i in range(len(labels)):
                labels_uni[labels[i]] += 1
                for j in range(len(labels)):
                    labels_matrix[labels[i],labels[j]] += 1
    labels_matrix = labels_matrix/labels_uni
    labels_matrix = labels_matrix/(np.sum(labels_matrix,axis=0)-1.0)
    for i in range(4716):
        labels_matrix[i,i] = 1.0
    np.savetxt('labels_uni.out', labels_uni, delimiter=',')
    np.savetxt('labels_matrix.out', labels_matrix, delimiter=',')"""
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _write(self, stream, text, byte_order):
        '''
        Write the data to a PLY file.

        '''
        if self._have_list:
            # There are list properties, so serialization is
            # slightly complicated.
            if text:
                self._write_txt(stream)
            else:
                self._write_bin(stream, byte_order)
        else:
            # no list properties, so serialization is
            # straightforward.
            if text:
                _np.savetxt(stream, self.data, '%.18g', newline='\r\n')
            else:
                data = self.data.astype(self.dtype(byte_order),
                                        copy=False)
                data.tofile(stream)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def write_segment(self, segment,
                                delimiter = '\t',

                                skiprows =0,
                                writetimecolumn = True,

                                ):
        """
        Write a segment and AnalogSignal in a text file.

         **Arguments**
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            writetimecolumn :  True or Flase write time vector as first column
        """
        if skiprows:
            raise NotImplementedError('skiprows values other than 0 are not ' +
                                      'supported')
        l = [ ]
        if writetimecolumn is not None:
            l.append(segment.analogsignals[0].times[:, np.newaxis])
        for anaSig in segment.analogsignals:
            l.append(anaSig.magnitude[:, np.newaxis])
        sigs = np.concatenate(l, axis=1)
        #print sigs.shape
        np.savetxt(self.filename , sigs , delimiter = delimiter)
项目:mbin    作者:fanglab    | 项目源码 | 文件源码
def transpose_contig_matrix( args ):
    contig  = args[0]
    opts    = args[1]
    logging.info("  Transposing %s" % contig)
    contig_ipds_fn       = os.path.join( opts.tmp, "%s_ipds.tmp"       % contig)
    contig_ipds_kmers_fn = os.path.join( opts.tmp, "%s_ipdskmers.tmp"  % contig)
    contig_ipds_N_fn     = os.path.join( opts.tmp, "%s_ipdsN.tmp"      % contig)
    contig_ipds          = np.loadtxt(contig_ipds_fn,       dtype="float")
    contig_ipds_kmers    = np.loadtxt(contig_ipds_kmers_fn, dtype="str")
    contig_ipds_N        = np.loadtxt(contig_ipds_N_fn,     dtype="int")
    if len(contig_ipds.shape)==1:
        contig_ipds   = contig_ipds.reshape(1,contig_ipds.shape[0])
        contig_ipds_N = contig_ipds_N.reshape(1,contig_ipds_N.shape[0])

    contig_ipds    = contig_ipds.T
    contig_ipds_N  = contig_ipds_N.T
    np.savetxt(contig_ipds_fn+".trans",   contig_ipds,   fmt="%.4f", delimiter="\t")
    np.savetxt(contig_ipds_N_fn+".trans", contig_ipds_N, fmt="%s",   delimiter="\t")
    return None
项目:iFruitFly    作者:AdnanMuhib    | 项目源码 | 文件源码
def writeToCSV(_imat, _nameOfFile):
    _rows, _columns = _imat.shape;
    _array = [];
    _index = 0;
    #_imat.tofile(_nameOfFile + ".csv", sep = ',', format = '%10.5f');    
    #np.savetxt(_nameOfFile + ".csv", _imat, delimiter = ",");
    for i in range(0, _rows):
        for j in range(0, _columns):
            _array.append([]);
            _array[_index].append(i);
            #print(_array[_index]);
            _array[_index].append(j);
            #print(_array[_index]);
            _array[_index].append(_imat[i][j]); 
            #print(_mag[i][j]);
            #_array[_index].append(_mag[i][j]);
            #_array[_index].append(_ang[i][j]);
            #print(_array[_index]); 
            _index = _index + 1;
    writeCSVFile(_array, _nameOfFile + ".csv");
            #np.savetxt(_nameOfFile + ".csv", _array, delimiter = ",", fmt = '%10.5f');
        #_array[_index].tofile(_nameOfFile + ".csv", sep = ',', format = '%10.5f');    
    return
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def save(self, outfile):
        if self.ps1d_normalized:
            ps1d_desc = "normalized power [K^2]"
        else:
            ps1d_desc = "power [K^2 Mpc^3]"
        header = [
            "EoR window definition:",
            "+ FoV: %f [deg]" % self.ps2d.fov,
            "+ e_ConvWidth: %f" % self.ps2d.e,
            "+ k_perp_min: %f [Mpc^-1]" % self.ps2d.k_perp_min,
            "+ k_perp_max: %f [Mpc^-1]" % self.ps2d.k_perp_max,
            "+ k_los_min: %f [Mpc^-1]" % self.ps2d.k_los_min,
            "+ k_los_max: %f [Mpc^-1]" % self.ps2d.k_los_max,
            "",
            "Columns:",
            "1. k: wavenumber [Mpc^-1]",
            "2. ps1d: %s" % ps1d_desc,
            "ps1d_err: power errors",
            "",
            "k   ps1d   ps1d_err",
        ]
        np.savetxt(outfile, self.ps1d, header="\n".join(header))
        print("Saved 1D power spectrum to file: %s" % outfile)
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def save(self, outfile):
        data = self.psd1d
        header = [
            "pixel: %s [%s]" % self.pixel,
            "frequency: [%s^-1]" % self.pixel[1],
        ]
        if self.meanstd:
            header += [
                "psd1d: *mean* powers of radial averaging annuli",
                "psd1d_err: *standard deviation*",
            ]
        else:
            header += [
                "psd1d: *median* powers of radial averaging annuli",
                "psd1d_err: 1.4826*MAD (median absolute deviation)",
            ]
        header += [
            "n_cells: number of averaging cells",
            "",
            "frequency   psd1d   psd1d_err   n_cells"
        ]
        np.savetxt(outfile, data, header="\n".join(header))
        print("Saved PSD data to: %s" % outfile)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def dump_all_actions(ae,configs,trans_fn,name="all_actions.csv",repeat=1):
    if 'dump' not in mode:
        return
    l = len(configs)
    batch = 5000
    loop = (l // batch) + 1
    try:
        print(ae.local(name))
        with open(ae.local(name), 'wb') as f:
            for i in range(repeat):
                for begin in range(0,loop*batch,batch):
                    end = begin + batch
                    print((begin,end,len(configs)))
                    transitions = trans_fn(configs[begin:end])
                    orig, dest = transitions[0], transitions[1]
                    orig_b = ae.encode_binary(orig,batch_size=1000).round().astype(int)
                    dest_b = ae.encode_binary(dest,batch_size=1000).round().astype(int)
                    actions = np.concatenate((orig_b,dest_b), axis=1)
                    np.savetxt(f,actions,"%d")
    except AttributeError:
        print("this AE does not support dumping")
    except KeyboardInterrupt:
        print("dump stopped")
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def dump_all_states(ae,configs,states_fn,name="all_states.csv",repeat=1):
    if 'dump' not in mode:
        return
    l = len(configs)
    batch = 5000
    loop = (l // batch) + 1
    try:
        print(ae.local(name))
        with open(ae.local(name), 'wb') as f:
            for i in range(repeat):
                for begin in range(0,loop*batch,batch):
                    end = begin + batch
                    print((begin,end,len(configs)))
                    states = states_fn(configs[begin:end])
                    states_b = ae.encode_binary(states,batch_size=1000).round().astype(int)
                    np.savetxt(f,states_b,"%d")
    except AttributeError:
        print("this AE does not support dumping")
    except KeyboardInterrupt:
        print("dump stopped")
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def dump_states(ae,states,name="states.csv",repeat=1):
    if 'dump' not in mode:
        return
    try:
        print(ae.local(name))
        with open(ae.local(name), 'wb') as f:
            for i in range(repeat):
                np.savetxt(f,ae.encode_binary(states,batch_size=1000).round().astype(int),"%d")
    except AttributeError:
        print("this AE does not support dumping")
    except KeyboardInterrupt:
        print("dump stopped")
    import subprocess


################################################################

# note: lightsout has epoch 200
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def writeModelUBC(mesh, fileName, model):
        """Writes a model associated with a TensorMesh
        to a UBC-GIF format model file.

        :param string fileName: File to write to
        :param numpy.ndarray model: The model
        """

        # Reshape model to a matrix
        modelMat = mesh.r(model, 'CC', 'CC', 'M')
        # Transpose the axes
        modelMatT = modelMat.transpose((2, 0, 1))
        # Flip z to positive down
        modelMatTR = utils.mkvc(modelMatT[::-1, :, :])

        np.savetxt(fileName, modelMatTR.ravel())
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def encoder(args, model):
    latent_dim = args.latent_dim
    data, charset = load_dataset(args.data, split = False)

    if os.path.isfile(args.model):
        model.load(charset, args.model, latent_rep_size = latent_dim)
    else:
        raise ValueError("Model file %s doesn't exist" % args.model)

    x_latent = model.encoder.predict(data)
    if args.save_h5:
        h5f = h5py.File(args.save_h5, 'w')
        h5f.create_dataset('charset', data = charset)
        h5f.create_dataset('latent_vectors', data = x_latent)
        h5f.close()
    else:
        np.savetxt(sys.stdout, x_latent, delimiter = '\t')
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def main():
    args = get_arguments()
    model = MoleculeVAE()

    data, data_test, charset = load_dataset(args.data)

    if os.path.isfile(args.model):
        model.load(charset, args.model, latent_rep_size = args.latent_dim)
    else:
        raise ValueError("Model file %s doesn't exist" % args.model)

    x_latent = model.encoder.predict(data)
    if not args.visualize:
        if not args.save_h5:
            np.savetxt(sys.stdout, x_latent, delimiter = '\t')
        else:
            h5f = h5py.File(args.save_h5, 'w')
            h5f.create_dataset('charset', data = charset)
            h5f.create_dataset('latent_vectors', data = x_latent)
            h5f.close()
    else:
        visualize_latent_rep(args, model, x_latent)
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def export_histories(self, path):
        if not os.path.exists(path):
            os.makedirs(path)
        i = np.arange(len(self.loss_history)) + 1
        z = np.array(zip(i, i*self.batch_size, self.loss_history))
        np.savetxt(path + 'loss_history.csv', z, delimiter=',', fmt=[
                   '%d', '%d', '%f'], header='iteration, n_images, loss')

        i = np.arange(len(self.train_acc_history), dtype=np.int)

        z = np.array(zip(i, self.train_acc_history))
        np.savetxt(path + 'train_acc_history.csv', z, delimiter=',', fmt=[
            '%d', '%f'], header='epoch, train_acc')

        z = np.array(zip(i, self.val_acc_history))
        np.savetxt(path + 'val_acc_history.csv', z, delimiter=',', fmt=[
            '%d', '%f'], header='epoch, val_acc')
        np.save(path + 'loss', self.loss_history)
        np.save(path + 'train_acc_history', self.train_acc_history)
        np.save(path + 'val_acc_history', self.val_acc_history)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_format(self):
        a = np.array([(1, 2), (3, 4)])
        c = BytesIO()
        # Sequence of formats
        np.savetxt(c, a, fmt=['%02d', '%3.1f'])
        c.seek(0)
        assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])

        # A single multiformat string
        c = BytesIO()
        np.savetxt(c, a, fmt='%02d : %3.1f')
        c.seek(0)
        lines = c.readlines()
        assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])

        # Specify delimiter, should be overiden
        c = BytesIO()
        np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
        c.seek(0)
        lines = c.readlines()
        assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])

        # Bad fmt, should raise a ValueError
        c = BytesIO()
        assert_raises(ValueError, np.savetxt, c, a, fmt=99)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_usecols(self):
        # Test the selection of columns
        # Select 1 column
        control = np.array([[1, 2], [3, 4]], float)
        data = TextIO()
        np.savetxt(data, control)
        data.seek(0)
        test = np.ndfromtxt(data, dtype=float, usecols=(1,))
        assert_equal(test, control[:, 1])
        #
        control = np.array([[1, 2, 3], [3, 4, 5]], float)
        data = TextIO()
        np.savetxt(data, control)
        data.seek(0)
        test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
        assert_equal(test, control[:, 1:])
        # Testing with arrays instead of tuples.
        data.seek(0)
        test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
        assert_equal(test, control[:, 1:])
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def create_scatter_plot(outfile_results, config):
    true_vs_pred = os.path.join(config.output_dir,
                                config.name + "_results.csv")
    true_vs_pred_plot = os.path.join(config.output_dir,
                                     config.name + "_results.png")
    with hdf.open_file(outfile_results, 'r') as f:
        prediction = f.get_node("/", "Prediction").read()
        y_true = f.get_node("/", "y_true").read()
        np.savetxt(true_vs_pred, X=np.vstack([y_true, prediction]).T,
                   delimiter=',')
        plt.figure()
        plt.scatter(y_true, prediction)
        plt.title('true vs prediction')
        plt.xlabel('True')
        plt.ylabel('Prediction')
        plt.savefig(true_vs_pred_plot)
项目:PPRE    作者:MaoYuwei    | 项目源码 | 文件源码
def WordToVec(bet_list):
    model = Word2Vec.load('word2vector.model')
    # ????
    bet_vec_list = []  # between???????
    for bet in bet_list:
        between_vec = []  # ???between??
        bet = bet.strip()
        num, line = bet.split(':', 1)
        line = line.split()
        between_vec = np.array([0] * 50)
        for word in line:
            if word in model:
                between_vec = between_vec + np.array(model[word])

        bet_vec_list.append(between_vec)
    bet_vec_list = np.array(bet_vec_list)
    np.savetxt('2.csv', bet_vec_list)
    return bet_vec_list
项目:FaceRecognitionProjects    作者:ForrestPi    | 项目源码 | 文件源码
def compar_pic(path1,path2):
    global net
    #??????
    X=read_image(path1)
    test_num=np.shape(X)[0]
    #X  ?? ?????
    out = net.forward_all(data = X)
    #fc7??????,??????
    feature1 = np.float64(out['fc7'])
    feature1=np.reshape(feature1,(test_num,4096))
    #np.savetxt('feature1.txt', feature1, delimiter=',')

    #??????
    X=read_image(path2)
    #X  ?? ?????
    out = net.forward_all(data=X)
    #fc7??????,??????
    feature2 = np.float64(out['fc7'])
    feature2=np.reshape(feature2,(test_num,4096))
    #np.savetxt('feature2.txt', feature2, delimiter=',')
    #????????cos?,??????????
    predicts=pw.cosine_similarity(feature1, feature2)
    return  predicts
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def run(self):
        """
        extract and resize images then write manifest files to disk.
        """
        cfg_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')
        log_file = os.path.join(self.orig_out_dir, 'train.log')
        manifest_list_cfg = ', '.join([k + ':' + v for k, v in self.manifests.items()])

        with open(cfg_file, 'w') as f:
            f.write('manifest = [{}]\n'.format(manifest_list_cfg))
            f.write('manifest_root = {}\n'.format(self.out_dir))
            f.write('log = {}\n'.format(log_file))
            f.write('epochs = 90\nrng_seed = 0\nverbose = True\neval_freq = 1\n')

        for setn, manifest in self.manifests.items():
            if not os.path.exists(manifest):
                pairs = self.train_or_val_pairs(setn)
                records = [(os.path.relpath(fname, self.out_dir),
                            os.path.relpath(self._target_filename(int(tgt)), self.out_dir))
                           for fname, tgt in pairs]
                np.savetxt(manifest, records, fmt='%s,%s')
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def main(self, input_ring):
        """Initiate the writing to filename
        @param[in] input_rings First ring in this list will be used for
            data
        @param[out] output_rings This list of rings won't be used."""
        span_generator = self.iterate_ring_read(input_ring)
        data_accumulate = None
        for span in span_generator:
            if self.nbit < 8:
                unpacked_data = unpack(span.data_view(self.dtype), self.nbit)
            else:
                if self.dtype == np.complex64:
                    unpacked_data = span.data_view(self.dtype).view(np.float32)
                elif self.dtype == np.complex128:
                    unpacked_data = span.data_view(self.dtype).view(np.float64)
                else:
                    unpacked_data = span.data_view(self.dtype)
            if data_accumulate is not None:
                data_accumulate = np.concatenate((data_accumulate, unpacked_data[0]))
            else:
                data_accumulate = unpacked_data[0]
        text_file = open(self.filename, 'a')
        np.savetxt(text_file, data_accumulate.reshape((1, -1)))
项目:pslab-desktop-apps    作者:fossasia    | 项目源码 | 文件源码
def saveData(self):
        try:
            os.mkdir(self.savedir)
        except:
            print('directory exists. overwriting')
        print ('saving to ',self.savedir)

        if self.calibrateOnlyADC: # create ideal dataset for PV1, PV2
            np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([np.linspace(-5,5,4096),np.linspace(-5,5,4096) ]))
            np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([np.linspace(-3.3,3.3,4096),np.linspace(-3.3,3.3,4096) ]))
        else:
            np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([self.A.ADC24['AIN5'],self.A.DAC_VALS['PV1'] ]))
            np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([self.A.ADC24['AIN6'],self.A.DAC_VALS['PV2'] ]))

        np.savetxt(os.path.join(self.savedir,'PV3_ERR.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.DAC_VALS['PV3'] ]))


        np.savetxt(os.path.join(self.savedir,'CALIB_INL.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.ADCPIC_INL]))
        for a in self.INPUTS:
            if self.I.analogInputSources[a].gainEnabled:
                for b in range(8):
                    raw=self.A.ADC_VALUES[a][b]
                    np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,self.I.gain_values[b])),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][b]],raw]))
            else:
                np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,1)),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][0]],self.A.ADC_VALUES[a][0]]))
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def main():
    import sys

    num_samples = int(sys.argv[1])
    num_variables = int(sys.argv[2])

    if num_variables < 9:
        raise ValueError('needed at least 9 variables')

    print 'Generation of %d samples with %d variables...' % (num_samples,
                                                             num_variables),

    X, Y = correlated_dataset(num_samples, num_variables, (5, 5, 5), [1.0]*15)
    np.savetxt('data.txt', X)
    np.savetxt('labels.txt', Y)

    print 'done'
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def main():
    import sys

    num_samples = int(sys.argv[1])
    num_variables = int(sys.argv[2])

    if num_variables < 9:
        raise ValueError('needed at least 9 variables')

    print 'Generation of %d samples with %d variables...' % (num_samples,
                                                             num_variables),

    X, Y = correlated_dataset(num_samples, num_variables, (5, 5, 5), [1.0]*15)
    np.savetxt('data.txt', X)
    np.savetxt('labels.txt', Y)

    print 'done'
项目:papers    作者:jeffheaton    | 项目源码 | 文件源码
def dump(self, base):
        header = ",".join(["x" + str(x) for x in range(1, 1 + self.X_train.shape[1])])
        header += ","
        header += ",".join(["y" + str(x) for x in range(1, 1 + self.y_train_nn.shape[1])])

        np.savetxt(base + "_train.csv",
                   np.hstack((self.X_train, self.y_train_nn)),
                   fmt='%10.5f', delimiter=',', header=header, comments="")

        np.savetxt(base + "_validate.csv",
                   np.hstack((self.X_validate, self.y_validate_nn)),
                   fmt='%10.5f', delimiter=',', header=header, comments="")

        np.savetxt(base + "_train_norm.csv",
                   np.hstack((self.X_train_norm, self.y_train_nn)),
                   fmt='%10.5f', delimiter=',', header=header, comments="")

        np.savetxt(base + "_validate_norm.csv",
                   np.hstack((self.X_validate_norm, self.y_validate_nn)),
                   fmt='%10.5f', delimiter=',', header=header, comments="")


# Human readable time elapsed string.
项目:GPS    作者:golsun    | 项目源码 | 文件源码
def save_raw_csv(raw, soln, dir_csv):
    np.savetxt(os.path.join(dir_csv,str(raw['axis0_type'])+'.csv'), raw['axis0'])
    np.savetxt(os.path.join(dir_csv,'pressure.csv'), raw['pressure'])
    np.savetxt(os.path.join(dir_csv,'temperature.csv'), raw['temperature'])
    np.savetxt(os.path.join(dir_csv,'mole_fraction.csv'), raw['mole_fraction'], delimiter=',')
    np.savetxt(os.path.join(dir_csv,'net_reaction_rate.csv'), raw['net_reaction_rate'], delimiter=',')
    if 'speed' in raw.keys():
        np.savetxt(os.path.join(dir_csv,'speed.csv'), raw['speed'], delimiter=',')

    f = open(os.path.join(dir_csv,'species_list.csv'),'w')
    for sp in soln.species_names:
        f.write(sp+'\n')
    f.close()

    f = open(os.path.join(dir_csv,'reaction_list.csv'),'w')
    for rxn in soln.reaction_equations():
        f.write(rxn+'\n')
    f.close()
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def save_adjacency_matrix_for_gephi(matrix, name, root_dir=None, notebook_mode=True, class_names=None):
    if root_dir is None: root_dir = os.getcwd()
    directory = check_or_create_dir(join_paths(root_dir, FOLDER_NAMINGS['GEPHI_DIR']),
                                    notebook_mode=notebook_mode)
    filename = join_paths(directory, '%s.csv' % name)

    m, n = np.shape(matrix)
    assert m == n, '%s should be a square matrix.' % matrix
    if not class_names:
        class_names = [str(k) for k in range(n)]

    left = np.array([class_names]).T
    matrix = np.hstack([left, matrix])
    up = np.vstack([[''], left]).T
    matrix = np.vstack([up, matrix])

    np.savetxt(filename, matrix, delimiter=';', fmt='%s')
项目:pythonml    作者:nicholastoddsmith    | 项目源码 | 文件源码
def GenerateData(nf = 256, ns = 16384):
    try:    #Try to read data from file
        A = np.loadtxt('bdatA.csv', delimiter = ',')
        Y = np.loadtxt('bdatY.csv', delimiter = ',').reshape(-1, 1)
    except OSError:     #New data needs to be generated
        x = np.linspace(-1, 1, num = ns).reshape(-1, 1)
        A = np.concatenate([x] * nf, axis = 1)
        Y = ((np.sum(A, axis = 1) / nf) ** 2).reshape(-1, 1)
        A = (A + np.random.rand(ns, nf)) / (2.0)
        np.savetxt('bdatA.csv', A, delimiter = ',')
        np.savetxt('bdatY.csv', Y, delimiter = ',')
    return (A, Y)

#R:     Regressor network to use
#A:     The sample data matrix
#Y:     Target data matrix
#nt:    Number of times to divide the sample matrix
#fn:    File name to write results
项目:pythonml    作者:nicholastoddsmith    | 项目源码 | 文件源码
def MakeBenchDataFeature(R, A, Y, nt, fn):
    #Divide samples into nt pieces on for each i run benchmark with chunks 0, 1, ..., i
    step = A.shape[1] // nt
    TT = np.zeros((nt, 3))
    for i in range(1, nt):
        #Number of features
        TT[i, 0] = len(range(0, (i * step)))
        print('{:8d} feature benchmark.'.format(int(TT[i, 0])))
        #Training and testing times respectively
        TT[i, 1], TT[i, 2] = RunBenchmark(R, A[:, 0:(i * step)], Y[:, 0:(i * step)])
    #Save benchmark data to csv file
    np.savetxt(fn, TT, delimiter = ',', header = 'Samples,Train,Test')

#R:     Regressor network to use
#A:     The sample data matrix
#Y:     Target data matrix
#nt:    Number of times to divide the sample matrix
#fn:    File name to write results
项目:DW-POSSUM    作者:marksgraham    | 项目源码 | 文件源码
def makeRotMatrix(motionParams, simDirClusterDirection):
  #Make three rotation matrices
  call(["makerot", "-t", str(motionParams[4]), "-a", "1,0,0", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "rotx.mat"])
  call(["makerot", "-t", str(motionParams[5]), "-a", "0,1,0", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "roty.mat"])
  call(["makerot", "-t", str(motionParams[6]), "-a", "0,0,1", "--cov="+simDirClusterDirection+ "/brain.nii.gz", "-o", "rotz.mat"])
  #Concatenate
  call(["convert_xfm", "-omat", "rotxy.mat","-concat", "roty.mat", "rotx.mat"])
  call(["convert_xfm", "-omat", "rotxyz.mat","-concat", "rotz.mat", "rotxy.mat"])

  #Add translations
  rot = np.loadtxt('rotxyz.mat')
  rot[0,3] += motionParams[1]
  rot[1,3] += motionParams[2]
  rot[2,3] += motionParams[3]
  np.savetxt('trans.mat', rot )
  #Tidy up
  call(["rm","rotx.mat","roty.mat","rotz.mat","rotxy.mat","rotxyz.mat",])
项目:tensorflow_end2end_speech_recognition    作者:hirofumi0810    | 项目源码 | 文件源码
def plot_loss(train_losses, dev_losses, steps, save_path):
    """Save history of training & dev loss as figure.
    Args:
        train_losses (list): train losses
        dev_losses (list): dev losses
        steps (list): steps
    """
    # Save as csv file
    loss_graph = np.column_stack((steps, train_losses, dev_losses))
    if os.path.isfile(os.path.join(save_path, "ler.csv")):
        os.remove(os.path.join(save_path, "ler.csv"))
    np.savetxt(os.path.join(save_path, "loss.csv"), loss_graph, delimiter=",")

    # TODO: error check for inf loss

    # Plot & save as png file
    plt.clf()
    plt.plot(steps, train_losses, blue, label="Train")
    plt.plot(steps, dev_losses, orange, label="Dev")
    plt.xlabel('step', fontsize=12)
    plt.ylabel('loss', fontsize=12)
    plt.legend(loc="upper right", fontsize=12)
    if os.path.isfile(os.path.join(save_path, "loss.png")):
        os.remove(os.path.join(save_path, "loss.png"))
    plt.savefig(os.path.join(save_path, "loss.png"), dvi=500)
项目:graynet    作者:raamana    | 项目源码 | 文件源码
def save(weight_vec, out_dir, subject, str_suffix=None):
    "Saves the features to disk."

    if out_dir is not None:
        # get outpath returned from hiwenet, based on dist name and all other parameters
        # choose out_dir name  based on dist name and all other parameters
        out_subject_dir = pjoin(out_dir, subject)
        if not pexists(out_subject_dir):
            os.mkdir(out_subject_dir)

        if str_suffix is not None:
            out_file_name = '{}_graynet.csv'.format(str_suffix)
        else:
            out_file_name = 'graynet.csv'

        out_weights_path = pjoin(out_subject_dir, out_file_name)

        try:
            np.savetxt(out_weights_path, weight_vec, fmt='%.5f')
            print('\nSaved the features to \n{}'.format(out_weights_path))
        except:
            print('\nUnable to save features to {}'.format(out_weights_path))
            traceback.print_exc()

    return
项目:holographic_memory    作者:jramapuram    | 项目源码 | 文件源码
def encode(sess, memory, encoder, values, keys, full_batch_host, keys_host, batch_size):
    full_batch_size = full_batch_host.shape[0]
    assert full_batch_size >= batch_size, "full batch size needs to be >= mini-batch size"
    memories_host = np.zeros([memory.num_models, memory.input_size])
    print 'full_batch_size = ', full_batch_size, 'minibatch_size = ', batch_size

    for begin,end in zip(range(0, full_batch_size, batch_size),
                         range(batch_size, full_batch_size+1, batch_size)):
        feed_dict={keys: keys_host[begin:end],
                   values: full_batch_host[begin:end]}

        # encode value with the keys
        memories_host += sess.run(encoder, feed_dict=feed_dict)

    #np.savetxt("encoded.csv", memories_host, delimiter=",")
    return memories_host
项目:bag-of-concepts    作者:hank110    | 项目源码 | 文件源码
def create_boc_w2v_train(doc_path,dim,win,freq,num_concept):
    '''
    Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts    Trains new W2v models simultaneously
    '''
    all_param=[]
    for edim in dim:
        model=train_w2v(doc_path,edim,win,freq)
        wlist=get_tokens(doc_path,freq) 
        wM=get_wordvectors(model,wlist)
        for ecp in num_concpt:
            w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
            boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
            word2concept=create_concepts(wM,wlist,w2c_output,num_concept) 
            boc=apply_cfidf(doc_path,word2concept,num_concept)
            np.savetxt(boc_output, boc, delimiter=",")
            print(".... BOC vectors created in %s" %boc_output)
            all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
    return all_param
项目:bag-of-concepts    作者:hank110    | 项目源码 | 文件源码
def create_boc_w2v_load(models,doc_path,win,freq,num_concept,model_path):
    '''
    Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts    Trains new W2v models simultaneously    
    '''
    all_param=[]
    for em in models:
        em_name=em.split("/")[-1]
        model=KeyedVectors.load_word2vec_format(em)
        wlist=get_tokens(doc_path,freq) 
        wM=get_wordvectors(model,wlist)
        for ecp in num_concpt:
            w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
            boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
            word2concept=create_concepts(wM,wlist,w2c_output,num_concept) 
            boc=apply_cfidf(doc_path,word2concept,num_concept)
            np.savetxt(boc_output, boc, delimiter=",")
            print(".... BOC vectors created in %s" %boc_output)
            all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
    return all_param
项目:bag-of-concepts    作者:hank110    | 项目源码 | 文件源码
def create_boc_w2v_train(doc_path,dim,win,freq,num_concept):
    '''
    Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts    Trains new W2v models simultaneously
    '''
    all_param=[]
    for edim in dim:
        model=train_w2v(doc_path,edim,win,freq)
        wlist=get_tokens(doc_path,freq) 
        wM=get_wordvectors(model,wlist)
        for ecp in num_concpt:
            w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
            boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(edim),str(win),str(freq),str(ecp))
            word2concept=create_concepts(wM,wlist,w2c_output,num_concept) 
            boc=apply_cfidf(doc_path,word2concept,num_concept)
            np.savetxt(boc_output, boc, delimiter=",")
            print(".... BOC vectors created in %s" %boc_output)
            all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
    return all_param
项目:bag-of-concepts    作者:hank110    | 项目源码 | 文件源码
def create_boc_w2v_load(models,doc_path,win,freq,num_concept,model_path):
    '''
    Creates (word, concept) result for given dimension, window, min freq threshold and num of concepts    Trains new W2v models simultaneously    
    '''
    all_param=[]
    for em in models:
        em_name=em.split("/")[-1]
        model=KeyedVectors.load_word2vec_format(em)
        wlist=get_tokens(doc_path,freq) 
        wM=get_wordvectors(model,wlist)
        for ecp in num_concpt:
            w2c_output="w2c_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
            boc_output="boc_d%s_w%s_mf%s_c%s.csv" %(str(em_name),str(win),str(freq),str(ecp))
            word2concept=create_concepts(wM,wlist,w2c_output,num_concept) 
            boc=apply_cfidf(doc_path,word2concept,num_concept)
            np.savetxt(boc_output, boc, delimiter=",")
            print(".... BOC vectors created in %s" %boc_output)
            all_param.append(namedtuple('parameters','document_path dimension window_size min_freq num_concept'))
    return all_param
项目:PySCUBA    作者:GGiecold    | 项目源码 | 文件源码
def write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers):

    processed_data_path = path.join(output_directory, 'processed_data.tsv')

    with open(processed_data_path, 'w') as f:
        f.write('\t'.join(cell_IDs))
        f.write('\n')
        f.write('\t'.join(cell_stages))
        f.write('\n')
        np.savetxt(f, data.T, fmt = '%.6f', delimiter = '\t')

    dataset = np.genfromtxt(processed_data_path, delimiter = '\t', dtype = str)
    dataset = np.insert(dataset, 0, np.append(['Cell ID', 'Stage'], 
        markers), axis = 1)

    with open(processed_data_path, 'w') as f:
        np.savetxt(f, dataset, fmt = '%s', delimiter = '\t')
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def prepare_inputs(*inputs, **kwinputs):
    """Prepare the inputs for the simulator.

    The signature follows that given in `elfi.tools.external_operation`. This function
    appends kwinputs with unique and descriptive filenames and writes an input file for
    the bdm executable.
    """
    alpha, delta, tau, N = inputs
    meta = kwinputs['meta']

    # Organize the parameters to an array. The broadcasting works nicely with constant
    # arguments.
    param_array = np.row_stack(np.broadcast(alpha, delta, tau, N))

    # Prepare a unique filename for parallel settings
    filename = '{model_name}_{batch_index}_{submission_index}.txt'.format(**meta)
    np.savetxt(filename, param_array, fmt='%.4f %.4f %.4f %d')

    # Add the filenames to kwinputs
    kwinputs['filename'] = filename
    kwinputs['output_filename'] = filename[:-4] + '_out.txt'

    # Return new inputs that the command will receive
    return inputs, kwinputs
项目:RecursiveHierarchicalClustering    作者:xychang    | 项目源码 | 文件源码
def run(self):
        # pr = cProfile.Profile()
        print '[LOG]: start new thread '+str(self.threadID)
        curTime = time.time()
        distM = self.matrix[self.sfrom].dot(
                    self.matrix[self.sto].T).todense()
        distM = np.maximum(
            np.arccos(np.minimum(distM, np.ones(distM.shape))) /
            (PI_VALUE/200)-0.01,
            np.zeros(distM.shape)).astype(np.int8)

        # np.savetxt(self.fo, distM, fmt = '%d')
        np.save(self.fo + '.npy', distM)
        print('[LOG]: thread %d finished after %d' %
              (self.threadID, time.time() - curTime))

        # self.pr.disable()
        # # sortby = 'cumulative'
        # # pstats.Stats(pr).strip_dirs().sort_stats(sortby).print_stats()
        # self.pr.print_stats()
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_format(self):
        a = np.array([(1, 2), (3, 4)])
        c = BytesIO()
        # Sequence of formats
        np.savetxt(c, a, fmt=['%02d', '%3.1f'])
        c.seek(0)
        assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])

        # A single multiformat string
        c = BytesIO()
        np.savetxt(c, a, fmt='%02d : %3.1f')
        c.seek(0)
        lines = c.readlines()
        assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])

        # Specify delimiter, should be overiden
        c = BytesIO()
        np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
        c.seek(0)
        lines = c.readlines()
        assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])

        # Bad fmt, should raise a ValueError
        c = BytesIO()
        assert_raises(ValueError, np.savetxt, c, a, fmt=99)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def main():
    labels_uni = np.zeros([4716,1])
    with open(flags.FLAGS.src_path_1, "rt", encoding='utf-8') as csvfile:
        spamreader = csv.reader(csvfile)
        line_num = 0
        for row in spamreader:
            line_num += 1
            print('the '+str(line_num)+'th file is processing')
            if line_num==1:
                continue
            lbs = row[1].split()
            for i in range(0,len(lbs),2):
                labels_uni[int(lbs[i])] += 1
    np.savetxt('labels_model.out', labels_uni, delimiter=',')
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def main():
    rootclass = {}
    with open(flags.FLAGS.src_path_1, "rt", encoding='utf-8') as csvfile:
        spamreader = csv.reader(csvfile)
        line_num = 0
        for row in spamreader:
            line_num += 1
            print('the '+str(line_num)+'th file is processing')
            if line_num==1:
                continue
            if row[5] in rootclass:
                rootclass[row[5]].append(line_num-2)
            else:
                rootclass[row[5]] = [line_num-2]
    labels_ordered = []
    for x in rootclass:
        labels_ordered.extend(rootclass[x])
    labels_ordered = [int(l) for l in labels_ordered]
    reverse_ordered = np.zeros([4716,1])
    for i in range(len(labels_ordered)):
        reverse_ordered[labels_ordered[i]] = i
    print(len(rootclass))
    print(labels_ordered)
    np.savetxt('labels_ordered.out', reverse_ordered, delimiter=',')
    random.shuffle(labels_ordered)
    reverse_unordered = np.zeros([4716,1])
    for i in range(len(labels_ordered)):
        reverse_unordered[labels_ordered[i]] = i
    print(labels_ordered)
    np.savetxt('labels_unordered.out', reverse_unordered, delimiter=',')
    labels_class = np.zeros([len(rootclass),4716])
    flag = 0
    for x in rootclass:
        for i in rootclass[x]:
            labels_class[flag,i] = 1
        flag +=1

    np.savetxt('labels_class.out', labels_class)
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def _write_txt(self, stream):
        '''
        Save a PLY element to an ASCII-format PLY file.  The element may
        contain list properties.

        '''
        for rec in self.data:
            fields = []
            for prop in self.properties:
                fields.extend(prop._to_fields(rec[prop.name]))

            _np.savetxt(stream, [fields], '%.18g', newline='\r\n')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _write_txt(self, stream):
        '''
        Save a PLY element to an ASCII-format PLY file.  The element may
        contain list properties.

        '''
        for rec in self.data:
            fields = []
            for prop in self.properties:
                fields.extend(prop._to_fields(rec[prop.name]))

            _np.savetxt(stream, [fields], '%.18g', newline='\r\n')
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def save_poses(fn, poses): 
    """ Save poses in toon format """ 
    Rts = [pose.matrix[:3,:] for pose in poses]
    with file(fn, 'w') as outfile:
        for Rt in Rts:
            for row in Rt:
                np.savetxt(outfile, row, fmt='%-8.7f', delimiter=' ', newline=' ')
                outfile.write('\n')
            outfile.write('\n')
    return
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def write_test_file(self, variable='v', check=False):
        data, metadata = self.build_test_data(variable)
        with open(self.test_file, 'wb') as f:
            for item in sorted(metadata.items()):
                f.write(("# %s = %s\n" % item).encode('utf8'))
            np.savetxt(f, data)
        if check:
            raise NotImplementedError
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _write_file_contents(self, data, metadata):
        with open(self.filename, 'wb') as f:
            for item in sorted(metadata.items()):
                f.write(("# %s = %s\n" % item).encode('utf8'))
            numpy.savetxt(f, data)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def write_test_file(self, variable='v', check=False):
        data, metadata = self.build_test_data(variable)
        with open(self.test_file, 'wb') as f:
            for item in sorted(metadata.items()):
                f.write(("# %s = %s\n" % item).encode('utf8'))
            np.savetxt(f, data)
        if check:
            raise NotImplementedError