Python path 模块,Path() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用path.Path()

项目:typepy    作者:thombashi    | 项目源码 | 文件源码
def main():
    maker = readmemaker.ReadmeMaker(PROJECT_NAME, OUTPUT_DIR)
    intro_root = Path(os.path.join("pages", "introduction"))

    maker.write_file(intro_root.joinpath("badges.txt"))
    maker.set_indent_level(0)

    maker.write_chapter("Summary")
    maker.write_file(intro_root.joinpath("summary.txt"))

    maker.write_chapter("Features")
    maker.write_file(intro_root.joinpath("features.txt"))

    write_examples(maker)

    maker.write_file(
        maker.doc_page_root_dir_path.joinpath("installation.rst"))

    maker.set_indent_level(0)
    maker.write_chapter("Documentation")
    maker.write_line_list([
        "http://{:s}.rtfd.io/".format(PROJECT_NAME),
    ])

    return 0
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def write_examples(maker):
    maker.set_indent_level(0)
    maker.write_chapter("Examples")

    examples_root = Path("pages").joinpath("examples")
    maker.inc_indent_level()

    maker.write_chapter("Load a CSV table")
    maker.write_file(examples_root.joinpath("load_csv.txt"))

    maker.write_chapter("Get loaded table data as pandas.DataFrame instance")
    maker.write_file(examples_root.joinpath("as_dataframe.txt"))

    maker.write_chapter("For more information")
    maker.write_line_list([
        "More examples are available at ",
        "http://{:s}.rtfd.io/en/latest/pages/examples/index.html".format(
            PROJECT_NAME),
    ])
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir, table_text, filename,
            table_name, expected_tabletuple_list):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with open(file_path, "w") as f:
            f.write(table_text)

        loader = ptr.JsonTableFileLoader(file_path)
        #loader.table_name = table_name

        load = False
        for tabledata in loader.load():
            print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))

            assert tabledata in expected_tabletuple_list
            load = True

        assert load
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir, test_id, table_text, filename,
            table_name, expected_tabledata_list):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with io.open(file_path, "w", encoding="utf-8") as f:
            f.write(table_text)

        loader = ptr.HtmlTableFileLoader(file_path)
        loader.table_name = table_name

        for tabledata, expected in zip(loader.load(), expected_tabledata_list):
            print("--- test {} ---".format(test_id))
            print("[expected]\n{}".format(ptw.dump_tabledata(expected)))
            print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))
            print("")

            assert tabledata == expected
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir, test_id, table_text, filename,
            table_name, expected_tabledata_list):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with open(file_path, "w") as f:
            f.write(table_text)

        loader = ptr.MediaWikiTableFileLoader(file_path)
        loader.table_name = table_name

        load = False
        for tabledata, expected in zip(loader.load(), expected_tabledata_list):
            print("--- test {} ---".format(test_id))
            print("[tabledata]\n{}".format(tabledata))
            print("[expected]\n{}".format(expected))
            print("")
            assert tabledata == expected

            load = True

        assert load
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir, test_id, table_text, filename, expected):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with io.open(file_path, "w", encoding="utf-8") as f:
            f.write(table_text)

        loader = ptr.LtsvTableFileLoader(file_path)

        for tabledata in loader.load():
            print("test-id={}".format(test_id))
            print("[expected]\n{}".format(ptw.dump_tabledata(expected)))
            print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))

            assert tabledata == expected
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir,
            test_id, table_text, filename, header_list, expected):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with io.open(file_path, "w", encoding="utf-8") as f:
            f.write(table_text)

        loader = ptr.TsvTableFileLoader(file_path)
        loader.header_list = header_list

        for tabledata in loader.load():
            print("test-id={}".format(test_id))
            print(ptw.dump_tabledata(tabledata))

            assert tabledata in expected
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir, test_id, table_text, filename,
            table_name, expected_tabledata_list):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        with open(file_path, "w") as f:
            f.write(table_text)

        loader = ptr.MarkdownTableFileLoader(file_path)
        loader.table_name = table_name

        load = False
        for tabledata, expected in zip(loader.load(), expected_tabledata_list):
            print("--- test {} ---".format(test_id))
            print("[tabledata]\n{}".format(tabledata))
            print("[expected]\n{}".format(expected))
            print("")
            assert tabledata == expected
            load = True

        assert load
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal(
            self, tmpdir,
            test_id, tabledata, filename, header_list, expected):
        file_path = Path(str(tmpdir.join(filename)))
        file_path.parent.makedirs_p()

        con = SimpleSQLite(file_path, "w")

        con.create_table_from_tabledata(tabledata)

        loader = ptr.SqliteFileLoader(file_path)
        loader.header_list = header_list

        for tabledata in loader.load():
            print("test-id={}".format(test_id))
            print(ptw.dump_tabledata(tabledata))

            assert tabledata in expected
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def save_weights(fname, params, metadata=None):
    """ assumes all params have unique names.
    """
    # Includes batchnorm params now
    names = [par.name for par in params]
    if len(names) != len(set(names)):
        raise ValueError('need unique param names')
    param_dict = { param.name : param.get_value(borrow=False)
            for param in params }
    if metadata is not None:
        param_dict['metadata'] = pickle.dumps(metadata)
    logging.info('saving {} parameters to {}'.format(len(params), fname))
    # try to avoid half-written files
    fname = Path(fname)
    if fname.exists():
        tmp_fname = Path(fname.stripext() + '.tmp.npz') # TODO yes, this is a hack
        np.savez_compressed(str(tmp_fname), **param_dict)
        tmp_fname.rename(fname)
    else:
        np.savez_compressed(str(fname), **param_dict)
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def write(playlists: List[Playlist], target_path: Path, target_library_root: str, source_library_root: str, exclude_playlist_folders: bool = True) -> None:
    persistent_id_to_playlist_dict = create_persistent_id_to_playlist_dict(playlists)
    filtered_playlist = filter_playlists_if_necessary(playlists, exclude_playlist_folders)
    root = etree.Element("rhythmdb-playlists")
    for playlist in filtered_playlist:
        name = create_playlist_name(playlist, persistent_id_to_playlist_dict)
        attributes = {'name': name, 'show-browser': 'true', 'browser-position': "231",
                      'search-type': "search-match", 'type': "static"}
        playlist_element = etree.SubElement(root, "playlist", attributes)
        for song in playlist.tracks:
            if song.location_escaped is not None:
                transformed_location = transform_to_rhythmbox_path(song.location_escaped, target_library_root, source_library_root)
                location_element = etree.SubElement(playlist_element, "location")
                location_element.text = transformed_location
            else:
                print("   Can't convert the track [{} - {}] in playlist '{}' because there is no file location defined. It's probably a remote file."
                      .format(song.artist, song.name, playlist.name))
    common.write_to_file(root, target_path, add_standalone_to_xml_declaration=False)
项目:tsrc    作者:SuperTanker    | 项目源码 | 文件源码
def find_workspace_path():
    """ Look for a workspace root somewhere in the upper directories
    hierarchy

    """
    head = os.getcwd()
    tail = True
    while tail:
        tsrc_path = os.path.join(head, ".tsrc")
        if os.path.isdir(tsrc_path):
            return path.Path(head)
        tbuild_yml_path = os.path.join(head, "tbuild.yml")
        if os.path.exists(tbuild_yml_path):
            return path.Path(head)

        else:
            head, tail = os.path.split(head)
    raise tsrc.Error("Could not find current workspace")
项目:bearsh    作者:coala    | 项目源码 | 文件源码
def run_bear(bearcls, instance, input, **options):
    """
    Analyze `input` with :class:`Unleashed` Bear `instance`.

    :param bearcls:
       The original coala Bear class.
    :param input:
       Either a file ``path.Path`` instance or a ``str`` of input data.
    """
    if isinstance(input, Path):
        filename = input
        data = input.lines()
    else:
        filename = ':bearsh-input:'
        data = [line + '\n' for line in str(input).split('\n')]
    return bearcls.run(instance, filename, data, **options)
项目:partycrasher    作者:naturalness    | 项目源码 | 文件源码
def from_json(cls, json_filename, db_filename='crashes.sqlite'):
        """
        Parses JSON creating a database.
        """

        json_filename = Path(json_filename)
        db_filename = Path(db_filename)

        if not db_filename.exists():
            pass
        elif db_filename.mtime > json_filename.mtime:
            return Corpus(db_filename)

        # Autovivify the corpus
        corpus = Corpus(db_filename)

        # Parse the JSON.
        data = load_oracle_data(json_filename, should_parse=False)
        crashes, _oracle_all, crash2bucket, _total_ids, _total_buckets = data

        for report_id, bucket_id in crash2bucket.items():
            if report_id not in crashes:
                continue
            corpus.insert_crash(report_id, crashes[report_id], bucket_id)
项目:click-configfile    作者:click-contrib    | 项目源码 | 文件源码
def save(ctx, dest="docs.html", format="html"):
    """Save/update docs under destination directory."""
    print("STEP: Generate docs in HTML format")
    build(ctx, builder=format)

    print("STEP: Save docs under %s/" % dest)
    source_dir = Path(ctx.config.sphinx.destdir)/format
    Path(dest).rmtree_p()
    source_dir.copytree(dest)

    # -- POST-PROCESSING: Polish up.
    for part in [ ".buildinfo", ".doctrees" ]:
        partpath = Path(dest)/part
        if partpath.isdir():
            partpath.rmtree_p()
        elif partpath.exists():
            partpath.remove_p()

# -----------------------------------------------------------------------------
# TASK CONFIGURATION:
# -----------------------------------------------------------------------------
项目:click-configfile    作者:jenisys    | 项目源码 | 文件源码
def save(ctx, dest="docs.html", format="html"):
    """Save/update docs under destination directory."""
    print("STEP: Generate docs in HTML format")
    build(ctx, builder=format)

    print("STEP: Save docs under %s/" % dest)
    source_dir = Path(ctx.config.sphinx.destdir)/format
    Path(dest).rmtree_p()
    source_dir.copytree(dest)

    # -- POST-PROCESSING: Polish up.
    for part in [ ".buildinfo", ".doctrees" ]:
        partpath = Path(dest)/part
        if partpath.isdir():
            partpath.rmtree_p()
        elif partpath.exists():
            partpath.remove_p()

# -----------------------------------------------------------------------------
# TASK CONFIGURATION:
# -----------------------------------------------------------------------------
项目:cmake-python-distributions    作者:scikit-build    | 项目源码 | 文件源码
def test_command_line(virtualenv, tmpdir):
    wheels = Path(DIST_DIR).files(pattern="*.whl")
    assert len(wheels) == 1

    virtualenv.run("pip install %s" % wheels[0])

    expected_version = "3.10.1"

    for executable_name in ["cmake", "cpack", "ctest"]:
        output = virtualenv.run(
            "%s --version" % executable_name, capture=True).splitlines()[0]
        assert output == "%s version %s" % (executable_name, expected_version)

    test_script = tmpdir.join("test_cmake.cmake")
    test_script.write(textwrap.dedent(r"""
    message("${CMAKE_COMMAND}")
    """))

    output = virtualenv.run("cmake -P %s" % str(test_script), capture=True)
    expected = os.path.realpath(virtualenv.virtualenv).replace(os.sep, "/")
    assert output[:len(expected)].lower() == expected.lower()
项目:sqlitebiter    作者:thombashi    | 项目源码 | 文件源码
def write_examples(maker):
    maker.set_indent_level(0)
    maker.write_chapter("Usage")

    usage_root = Path("pages").joinpath("usage")

    maker.inc_indent_level()
    maker.write_chapter("Create SQLite database from files")

    maker.write_line_list([
        ".. image:: docs/gif/usage_example.gif",
    ])

    maker.write_chapter("Create SQLite database from URL")
    maker.write_file(usage_root.joinpath("url", "usage.txt"))

    maker.inc_indent_level()
    maker.write_chapter("For more information")
    maker.write_line_list([
        "More examples are available at ",
        "http://{:s}.rtfd.io/en/latest/pages/{:s}/index.html".format(
            PROJECT_NAME.lower(), maker.examples_dir_name),
    ])
项目:pathvalidate    作者:thombashi    | 项目源码 | 文件源码
def write_examples(maker):
    maker.set_indent_level(0)
    maker.write_chapter("Examples")

    example_root = Path("pages").joinpath("examples")

    maker.inc_indent_level()
    maker.write_chapter("Validate a filename")
    maker.write_file(example_root.joinpath("validate_filename_code.txt"))

    maker.write_chapter("Sanitize a filename")
    maker.write_file(example_root.joinpath("sanitize_filename_code.txt"))

    maker.write_chapter("Sanitize a variable name")
    maker.write_file(example_root.joinpath("sanitize_var_name_code.txt"))

    maker.write_chapter("For more information")
    maker.write_line_list([
        "More examples are available at ",
        "http://pathvalidate.rtfd.io/en/latest/pages/examples/index.html",
    ])
项目:tcconfig    作者:thombashi    | 项目源码 | 文件源码
def write_examples(maker):
    maker.set_indent_level(0)
    maker.write_chapter("Usage")

    usage_root = Path("pages").joinpath("usage")

    maker.inc_indent_level()
    maker.write_chapter("Set traffic control (``tcset`` command)")
    maker.write_file(usage_root.joinpath("tcset", "description.txt"))
    maker.write_file(usage_root.joinpath("tcset", "basic_usage.rst"))

    maker.write_file(usage_root.joinpath("tcdel", "header.rst"))
    maker.write_file(usage_root.joinpath("tcdel", "usage.rst"))

    maker.write_file(usage_root.joinpath("tcshow", "header.rst"))
    maker.write_file(usage_root.joinpath("tcshow", "usage.rst"))

    maker.write_chapter("For more information")
    maker.write_line_list([
        "More examples are available at ",
        "http://{:s}.rtfd.io/en/latest/pages/usage/index.html".format(
            PROJECT_NAME),
    ])
项目:idasec    作者:RobinDavid    | 项目源码 | 文件源码
def export_result(self, _):
        filename = QtWidgets.QFileDialog.getSaveFileName()[0]
        filepath = Path(filename)
        if not filepath.exists() and filepath != '':
                report = filepath if filepath.ext == ".html" else filepath.dirname() / filepath.namebase+".html"
                raw = filepath.dirname() / filepath.namebase+".csv"
                html_file = filepath.dirname() / filepath.namebase+".html"
                html_file.write_bytes(self.report.generate())
                report.write_text(self.report.generate())
                f = raw.open("w")
                for addr, infos in self.results.iteritems():
                    f.write_bytes(u"0x%x,%s,%d,%s,0x%x,0x%x\n" % (addr, to_status_name(infos.status), infos.k,
                                                            infos.dependency, infos.alive_branch, infos.dead_branch))
                f.close()
                self.log("[info]", "Export done in %s and %s" % (report.basename(), raw.basename()))
        else:
            self.log("[error]", "File already exists.. (do not save)")
项目:idasec    作者:RobinDavid    | 项目源码 | 文件源码
def save_config_clicked(self, infile=True):
        raw_config = self.configuration_textarea.toPlainText()
        if raw_config == "":
            print "Press Generate button first"
        else:
            try:
                json_data = json.loads(raw_config)
                self.core.configuration.Clear()
                json2pb(self.core.configuration, json_data)
                if infile:
                    json_data = pb2json(self.core.configuration)
                    filename = QtWidgets.QFileDialog.getSaveFileName()[0]
                    filepath = Path(filename)
                    if filepath != '':
                        bytes = json.dumps(json_data, indent=4)
                        filepath.write_bytes(bytes)
                    else:
                        print "Invalid file given %s" % str(filepath)
            except KeyError as e:
                print "invalid key:"+e.message
项目:idasec    作者:RobinDavid    | 项目源码 | 文件源码
def dump_trace(self):
        filename = QtWidgets.QFileDialog.getSaveFileName()[0]
        filepath = Path(filename)
        if not filepath.exists() and filepath != '':
            try:
                index = self.traces_tab.currentIndex()
                trace = self.core.traces[self.id_map[index]]
                f = filepath.open("w")
                for line in trace.to_string_generator():
                    f.write(line+"\n")
                f.close()
                print "Writing done"
            except KeyError:
                print "Trace not found"
        else:
            print "File already exists.. (do not dump)"
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def save_weights(fname, params, history=None):
    param_dict = convert2dict(params)

    logging.info('saving {} parameters to {}'.format(len(params), fname))
    fname = Path(fname)

    filename, ext = osp.splitext(fname)
    history_file = osp.join(osp.dirname(fname), 'history.npy')
    np.save(history_file, history)
    logging.info("Save history to {}".format(history_file))
    if ext == '.npy':
        np.save(filename + '.npy', param_dict)
    else:
        f = gzip.open(fname, 'wb')
        pickle.dump(param_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
        f.close()
项目:neb    作者:cstein    | 项目源码 | 文件源码
def __init__(self, initial, final, nsteps=10):
        path.Path.__init__(self)

        assert isinstance(nsteps, int)

        self._molecules = [initial]

        ci = initial.getCoordinates()
        cf = final.getCoordinates()
        delta = (cf - ci) / (nsteps - 1)

        # only generate the inner range
        for k in range(1, nsteps-1):
            m2 = Molecule.fromMolecule(initial)
            m2.setCoordinates(ci + k*delta)
            self._molecules.append(m2)

        self._molecules.append(final)
        assert self.getNumBeads() == nsteps
项目:MorphForest    作者:j-luo93    | 项目源码 | 文件源码
def get_seg_path(self, word):
        path = Path(word)
        while not path.is_ended():
            child = path.get_fringe_word()
            parts = child.split("'")
            if len(parts) == 2 and len(parts[0]) > 0 and self.lang == 'eng':
                path.expand(child, parts[0], 'APOSTR')
            else:
                parts = child.split('-')
                if len(parts) > 1:
                    p1, p2 = parts[0], child[len(parts[0]) + 1:]
                    path.expand(child, (p1, p2), 'HYPHEN')
                else:
                    parent, type_ = self.predict(child)
                    path.expand(child, parent, type_)
        return path
项目:MorphForest    作者:j-luo93    | 项目源码 | 文件源码
def get_seg_path(self, w):
        path = Path(w)
        while not path.is_ended():
            child = path.get_fringe_word()
            parts = child.split("'")
            if len(parts) == 2 and len(parts[0]) > 0 and self.base.lang == 'eng':
                path.expand(child, parts[0], 'APOSTR')
            else:
                parts = child.split('-')
                if len(parts) > 1:
                    p1, p2 = parts[0], child[len(parts[0]) + 1:]
                    path.expand(child, (p1, p2), 'HYPHEN')
                else:
                    parent, type_ = self.predict(child)
                    path.expand(child, parent, type_)
        return path
项目:typepy    作者:thombashi    | 项目源码 | 文件源码
def write_examples(maker):
    maker.set_indent_level(0)
    maker.write_chapter("Usage")

    intro_root = Path(os.path.join("pages", "introduction"))

    maker.write_file(intro_root.joinpath("usage.txt"))
项目:skymod    作者:DelusionalLogic    | 项目源码 | 文件源码
def cli(config):
    global down_cache, src_cache

    if config:
        read_config(Path(config))
    else:
        read_config()

    down_cache = DirMap(cfg.cache.dir)
    src_cache = DirMap(cfg.source.dir)
项目:skymod    作者:DelusionalLogic    | 项目源码 | 文件源码
def __init__(self, string):
        self.uri, filename = string.split("::")
        self.filename = Path(filename)
项目:skymod    作者:DelusionalLogic    | 项目源码 | 文件源码
def install(context, from_str, to_str=""):
    to = Path(to_str)
    if not is_subdir(context.pkgins / to, context.pkgins):
        raise Exception("Package tried to copy to directory outside its own")

    from_ = Path(from_str).normpath()
    if from_.isabs():
        raise Exception("From glob is not allowed to be absolute")

    from_ = context.source_lookup.translate(from_)

    for d in Path().glob(from_):
        from_ = d
        if not is_subdir(from_, context.pkgsrc):
            raise Exception("Package tried to copy from directory outside its own")

        name = from_.name

        if from_.isdir():
            rel_from = from_.relpath(context.pkgsrc)
            context.add_package_file( (rel_from, to / name) )
            for e in from_.walk():
                rel_to = e.relpath(from_)
                rel_from = e.relpath(context.pkgsrc)
                context.add_package_file( (rel_from, to / name / rel_to) )
        else:
            rel_from = from_.relpath(context.pkgsrc)
            context.add_package_file( (rel_from, to / name) )
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal_csv(self,  tmpdir, file_path, format_name):
        filename = pv.replace_symbol(file_path, "")
        p_file_path = Path(
            six.text_type(tmpdir.join(filename + Path(file_path).ext)))
        p_file_path.parent.makedirs_p()

        with open(p_file_path, "w") as f:
            f.write('''"attr_a","attr_b","attr_c"
    1,4,"a"
    2,2.1,"bb"
    3,120.9,"ccc"''')

        expeced_list = [
            TableData(
                filename,
                ["attr_a", "attr_b", "attr_c"],
                [
                    [1, 4,      "a"],
                    [2, "2.1",    "bb"],
                    [3, "120.9",  "ccc"],
                ])
        ]

        loader = ptr.TableFileLoader(p_file_path, format_name=format_name)

        assert loader.format_name == "csv"

        for tabledata, expected in zip(loader.load(), expeced_list):
            print(ptw.dump_tabledata(expected))
            print(ptw.dump_tabledata(tabledata))

            assert tabledata == expected
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal_json(self, tmpdir, file_path, format_name):
        p_file_path = Path(str(tmpdir.join(file_path)))
        p_file_path.parent.makedirs_p()

        with open(p_file_path, "w") as f:
            f.write('''[
        {"attr_a": 1},
        {"attr_b": 2.1, "attr_c": "bb"}
    ]''')

        expeced_list = [
            TableData(
                "validdata",
                ["attr_a", "attr_b", "attr_c"],
                [
                    {'attr_a': 1},
                    {'attr_b': 2.1, 'attr_c': 'bb'},
                ]),
        ]

        loader = ptr.TableFileLoader(p_file_path, format_name=format_name)

        assert loader.format_name == "json"

        for tabledata, expected in zip(loader.load(), expeced_list):
            assert tabledata == expected
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def test_normal_excel(self, tmpdir):
        file_path = '/tmp/valid/test/data/validdata.xlsx'
        p_file_path = Path(str(tmpdir.join(file_path)))
        p_file_path.parent.makedirs_p()

        tabledata_list = [
            TableData(
                table_name='testsheet1',
                header_list=['a1', 'b1', 'c1'],
                record_list=[
                    ['aa1', 'ab1', 'ac1'],
                    [1.0, 1.1, 'a'],
                    [2.0, 2.2, 'bb'],
                    [3.0, 3.3, 'cc"dd"'],
                ]),
            TableData(
                table_name='testsheet3',
                header_list=['a3', 'b3', 'c3'],
                record_list=[
                    ['aa3', 'ab3', 'ac3'],
                    [4.0, 1.1, 'a'],
                    [5.0, '', 'bb'],
                    [6.0, 3.3, ''],
                ]),
        ]

        writer = ptw.ExcelXlsxTableWriter()
        writer.open(p_file_path)
        for tabledata in tabledata_list:
            writer.from_tabledata(tabledata)
        writer.write_table()
        writer.close()

        loader = ptr.TableFileLoader(p_file_path)

        assert loader.format_name == "excel"

        for tabledata in loader.load():
            print(ptw.dump_tabledata(tabledata))

            assert tabledata in tabledata_list
项目:pytablereader    作者:thombashi    | 项目源码 | 文件源码
def _get_filename_tablename_mapping(self):
        filename = ""
        if all([
                self.source_type == SourceType.FILE,
                typepy.is_not_null_string(self.source),
        ]):
            filename = path.Path(self.source).namebase

        return (tnt.FILENAME, filename)
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def __init__(self, fname, reinitialize=False):
        self.fname = Path(fname)
        self.reinitialize = reinitialize
        if self.fname.exists():
            if self.reinitialize:
                logging.warn('{} exists, deleting'.format(self.fname))
                self.fname.remove()
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def setUp(self):
        self.target_folder = Path(settings.TESTOUTPUT_FOLDER).joinpath("PlaylistTest")
        if not self.target_folder.exists():
            self.target_folder.makedirs()
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def write_playlist_and_compare(self, itunes_library_input: Path, expected_playlist_xml: Path):
        target_path = self.target_folder.joinpath(expected_playlist_xml.name)
        itunes_library_path = str(itunes_library_input)
        playlists = itunes_library_reader.read_playlists(itunes_library_path)
        rhythmbox_playlists_writer.write(playlists=playlists,
                                         target_path=target_path,
                                         source_library_root="D:/Music/",
                                         target_library_root="/home/pha/Music/")
        with target_path.open(mode="r", encoding="UTF-8") as target_path_opened, \
                expected_playlist_xml.open("r") as expected_playlist_xml_opened:
            actual_playlist_xml = target_path_opened.read()
            expected_playlist_xml = expected_playlist_xml_opened.read()
        self.assertEqual(actual_playlist_xml, expected_playlist_xml, "{} and {} are different!".format(target_path_opened, expected_playlist_xml))
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def setUp(self):
        self.target_folder = Path(settings.TESTOUTPUT_FOLDER).joinpath("CounterIntegrationTest")
        if not self.target_folder.exists():
            self.target_folder.makedirs()
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def set_values_and_compare(self, rhythmdb_without_cout_rating: Path,
                               itunes_library_path: Path,
                               expected_rhythmboxdb:Path,
                               output_file_name: str,
                               assert_something_was_changed: bool,
                               itunes_library_root: str="D:/Music/",
                               rhythmbox_library_root: str="/home/pha/Music/") -> IntegrationLog:
        target_rhythmdb = self.target_folder.joinpath(output_file_name)
        rhythmdb_without_cout_rating.copy(target_rhythmdb)
        itunes_library = str(itunes_library_path)
        songs = itunes_library_reader.read_songs(itunes_library)
        log = rhythmbox_count_rating_integrator.set_values(itunes_songs=songs,
                                                         target_rhythmdb=target_rhythmdb,
                                                         itunes_library_root=itunes_library_root,
                                                         rhythmbox_library_root=rhythmbox_library_root)
        print("Expect something has changed: {}".format(assert_something_was_changed))
        if assert_something_was_changed:
            self.assertTrue(log.something_was_changed(), "No song entries was changed! But they should be!")
        else:
            self.assertFalse(log.something_was_changed(), "A song entries was changed! But they shouldn't be!")

        print("Compare content of {} (actual) with {} (expected)".format(target_rhythmdb, expected_rhythmboxdb))
        with expected_rhythmboxdb.open(mode="r", encoding="UTF-8") as expected_rhythmboxdb_opened, target_rhythmdb.open(
                "r") as target_rhythmdb_opened:
            actual_playlist_xml = target_rhythmdb_opened.read()
            expected_playlist_xml = expected_rhythmboxdb_opened.read()
        # comparing xml is a pain. simple string comparision doesn't work due to different tag order and formatting (newline after each tag or not).
        # so let's sort each character in both xml strings. this leads to rubbish. but if the sorted rubbish is equal, the origin is xml is very likely to be equal.
        actual_playlist_xml_normalized = sort_and_clean(actual_playlist_xml)
        expected_playlist_xml_normalized = sort_and_clean(expected_playlist_xml)
        self.assertEqual(actual_playlist_xml_normalized, expected_playlist_xml_normalized,
                         "Normalized content of {} and {} are different!".format(expected_rhythmboxdb, target_rhythmdb))
        return log
项目:migrate-itunes-to-rhythmbox    作者:phauer    | 项目源码 | 文件源码
def set_values(itunes_songs: Dict[int, Song], target_rhythmdb: Path, itunes_library_root: str, rhythmbox_library_root: str) -> IntegrationLog:
    itunes_statistics_dict = create_itunes_statistic_dict(itunes_songs, itunes_library_root)

    rhythmdb = lxml.etree.parse(target_rhythmdb)
    root = rhythmdb.getroot()
    log = integrate_statistics_into_rhythmdb(root, itunes_statistics_dict, rhythmbox_library_root)

    if log.something_was_changed():
        common.write_to_file(root, target_rhythmdb, add_standalone_to_xml_declaration=True)
    return log
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def __init__(self, _path):

        self.__path = str(Path(_path).abspath())
        self.__props = None
        self.__read_props()
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def abspath(self):
        return path.Path.abspath(self)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def exists(self):
        return path.Path.exists(self)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def get_size(self):
        return path.Path.getsize(self)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def remove(self):
        return path.Path.remove(self)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def write_text(self,
                   text,
                   encoding=None,
                   errors='strict',
                   linesep=os.linesep,
                   append=False):

        return path.Path.write_text(self, text, encoding, errors, linesep, append)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def joinpath(self, first, *others):
        return Path(super(Path, self).joinpath(first, *others))
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def create_temp_file(
        *,
        suffix: str = None,
        prefix: str = None,
        create_in_dir: str = None) -> Path:
    os_handle, temp_file = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=create_in_dir or tempfile.gettempdir())
    os.close(os_handle)

    return Path(temp_file)
项目:EMFT    作者:132nd-etcher    | 项目源码 | 文件源码
def create_temp_dir(
        *,
        suffix: str = None,
        prefix: str = None,
        create_in_dir: str = None) -> Path:
    temp_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=create_in_dir or tempfile.gettempdir())

    return Path(temp_dir)