Python os.path 模块,dirname() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用os.path.dirname()

项目:hdx-data-freshness    作者:OCHA-DAP    | 项目源码 | 文件源码
def script_dir(pyobject, follow_symlinks=True):
    """Get current script's directory

    Args:
        pyobject (Any): Any Python object in the script
        follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.

    Returns:
        str: Current script's directory
    """
    if getattr(sys, 'frozen', False):  # py2exe, PyInstaller, cx_Freeze
        path = abspath(sys.executable)
    else:
        path = inspect.getabsfile(pyobject)
    if follow_symlinks:
        path = realpath(path)
    return dirname(path)
项目:pynini    作者:daffidilly    | 项目源码 | 文件源码
def load_data(self):
        # work in the parent of the pages directory, because we
        # want the filenames to begin "pages/...".
        chdir(dirname(self.setup.pages_dir))
        rel = relpath(self.setup.pages_dir)
        for root, dirs, files in walk(rel):
            for filename in files:
                start, ext = splitext(filename)
                if ext in self.setup.data_extensions:
                    #yield root, dirs, filename
                    loader = self.setup.data_loaders.get(ext)
                    path = join(root,filename)
                    if not loader:
                        raise SetupError("Identified data file '%s' by type '%s' but no loader found" % (filename, ext))

                    data_key = join(root, start)
                    loaded_dict = loader.loadf(path)
                    self.data[data_key] = loaded_dict

                    #self.setup.log.debug("data key [%s] ->" % (data_key, ), root, filename, ); pprint.pprint(loaded_dict, sys.stdout)

        #pprint.pprint(self.data, sys.stdout)
        #print("XXXXX data:", self.data)
项目:shellgen    作者:MarioVilas    | 项目源码 | 文件源码
def run(self):
        import sys
        sys.path.insert(0, dirname(__file__))
        print "testing shellgen.base"
        from shellgen.base import test
        test()
        print "testing shellgen.util"
        from shellgen.util import test
        test()
        print "testing shellgen.payload"
        from shellgen.payload import test
        test()
        print "testing shellgen.export"
        from shellgen.export import test
        test()
        from shellgen.util import get_available_platforms, \
                                  get_available_modules
        for module in get_available_modules("abstract", "any"):
            self.__test_module("abstract", "any", module)
        for arch, os in get_available_platforms():
            for module in get_available_modules(arch, os):
                self.__test_module(arch, os, module)
项目:pynini    作者:daffidilly    | 项目源码 | 文件源码
def get_all_pages(self):
        # work in the parent of the pages directory, because we
        # want the filenames to begin "pages/...".
        chdir(dirname(self.setup.pages_dir))
        rel = relpath(self.setup.pages_dir)

        for root, dirs, files in walk(rel):  # self.config.pages_dir):

            # examples:
            #
            #  root='pages'              root='pages/categories'
            #  dirs=['categories']       dirs=[]
            #  files=['index.html']      files=['list.html']

            # self.setup.log.debug("\nTEMPLATE ROOT: %s" % root)
            # self.setup.log.debug("TEMPLATE DIRS: %s" % dirs)
            # self.setup.log.debug("TEMPLATE FILENAMES: %s" % files)
            # #dir_context = global_context.new_child(data_tree[root])

            for filename in files:
                start, ext = splitext(filename)
                if ext in self.setup.template_extensions:
                    # if filename.endswith(".html"):  # TODO: should this filter be required at all?
                    yield Page(self.setup, filename, join(root, filename))
项目:pynini    作者:daffidilly    | 项目源码 | 文件源码
def write(self, out_path, context):
        out_dir = dirname(out_path)
        if not isdir(out_dir):
            mkdir_p(out_dir, self.setup.mkdir_perms, exist_ok=True)

        self.setup.log.info("format: %s (%s) -> %s" % (
            self.file_name, self.file_path, out_path))

        automatic_variables = dict(
            page=self.file_path,
            root=self.relative_root_path,
        )

        context = context \
            .new_child(automatic_variables)

        t = self.setup.jinja.get_template(self.file_path)

        #t.stream(context).dump(out_path)
        self.setup.template_writer.write(t, context, out_path)
项目:Telebackup    作者:LonamiWebs    | 项目源码 | 文件源码
def __init__(self, current_date, media_handler,
                 previous_date=None, following_date=None):
        """Initializes a new HTMLTLWriter for a current day which outputs to
           out_file_func(current_date).

           A media handler must be given so the generated files know where
           to look for, for example, images, profile pictures, etc.

           Two optional previous/following dates parameters can be given which
           dates should correspond to the previous and following days"""
        self.current_date = current_date
        self.formatter = HTMLFormatter(media_handler)

        # Open the current output file and store its handle
        output_file = media_handler.get_html_path(current_date)
        makedirs(dirname(output_file), exist_ok=True)
        self.handle = open(output_file, 'w', encoding='utf-8')

        # Begin the header before writing any Telegram message
        self.start_header(current_date=current_date,
                          previous_date=previous_date,
                          following_date=following_date)
项目:whatstyle    作者:mikr    | 项目源码 | 文件源码
def modified_results(tool):
    workdir = dirname(dirname(abspath(__file__)))
    exeresult = run_executable('git', ['-C', workdir, 'ls-files', '-z', '-m',
                                       'tests/examples/*/result_*'])
    files = exeresult.stdout.split(b'\x00')
    ignoreexts = bytestr('.cfg .cfg_diff .conf .pro .pro_diff .txt').split()
    result_filenames = []
    for f in files:
        if not f:
            continue
        filename = join(bytestr(workdir), f)
        _, ext = os.path.splitext(filename)
        if not ext or ext in ignoreexts:
            continue
        if not os.path.exists(filename):
            continue
        result_filenames.append(filename)
    diff_for_files(tool, result_filenames)
项目:foremast    作者:gogoair    | 项目源码 | 文件源码
def load_dynamic_config(configurations, config_dir=getcwd()):
    """Load and parse dynamic config"""
    # Create full path of config
    config_file = '{path}/config.py'.format(path=config_dir)

    # Insert config path so we can import it
    sys.path.insert(0, path.dirname(path.abspath(config_file)))
    try:
        config_module = __import__('config')

        for key, value in config_module.CONFIG.items():
            LOG.debug('Importing %s with key %s', key, value)
            # Update configparser object
            configurations.update({key: value})
    except ImportError:
        # Provide a default if config not found
        configurations = {}
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_keep_checkpoints(self):
        """
        Test if the checkpoints are kept.

        This is regression test for issue #71 (TF ``Saver`` is keeping only the last 5 checkpoints).
        """
        dummy_model = SimpleModel(dataset=None, log_dir=self.tmpdir, inputs=[], outputs=['output'])

        checkpoints = []
        for i in range(20):
            checkpoints.append(dummy_model.save(str(i)))

        for checkpoint in checkpoints:
            self.assertTrue(path.exists(checkpoint+'.index'))
            self.assertTrue(path.exists(checkpoint+'.meta'))
            data_prefix = path.basename(checkpoint)+'.data'
            data_files = [file for file in os.listdir(path.dirname(checkpoint)) if file.startswith(data_prefix)]
            self.assertGreater(len(data_files), 0)
项目:pineapple    作者:peter765    | 项目源码 | 文件源码
def load_plugins(self):
        """
        Load all plugin files in the folder (specified by self.directory) as modules
        and into a container dictionary list.
        :return:
        """
        # Clear containers
        self.plugins.clear()
        self.commands.clear()
        self.join.clear()
        self.leave.clear()
        self.typing.clear()
        self.delete.clear()
        self.loop.clear()

        # Find all python files in the plugin directory
        modules = glob.glob(dirname(__file__) + "/" + self.dir + "/**/*.py", recursive=True)

        # Iterate over each file, import them as a Python module and add them to the plugin list
        for f in modules:
            spec = importlib.util.spec_from_file_location(basename(f)[:-3], f)
            plugin = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(plugin)
            self.plugins[basename(f)] = plugin.Plugin(self)
            print("Loaded plugin: " + basename(f))
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def metadata_path(self, m_path):
        """Provide pointers to the paths of the metadata file

        Args:
            m_path: Path to metadata file

        """
        if not m_path:
            self.metadata_dir = None
            self.metadata_file = None

        else:
            if not op.exists(m_path):
                raise OSError('{}: file does not exist!'.format(m_path))

            if not op.dirname(m_path):
                self.metadata_dir = '.'
            else:
                self.metadata_dir = op.dirname(m_path)
            self.metadata_file = op.basename(m_path)

            # TODO: update using Biopython's built in SeqRecord parser
            # Just updating IDs and stuff
            self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def split_folder_and_path(filepath):
    """Split a file path into its folder, filename, and extension

    Args:
        path (str): Path to a file

    Returns:
        tuple: of (folder, filename (without extension), extension)

    """
    dirname = op.dirname(filepath)
    filename = op.basename(filepath)
    splitext = op.splitext(filename)
    filename_without_extension = splitext[0]
    extension = splitext[1]

    return dirname, filename_without_extension, extension
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def feature_path(self, gff_path):
        """Load a GFF file with information on a single sequence and store features in the ``features`` attribute

        Args:
            gff_path: Path to GFF file.

        """
        if not gff_path:
            self.feature_dir = None
            self.feature_file = None

        else:
            if not op.exists(gff_path):
                raise OSError('{}: file does not exist!'.format(gff_path))

            if not op.dirname(gff_path):
                self.feature_dir = '.'
            else:
                self.feature_dir = op.dirname(gff_path)
            self.feature_file = op.basename(gff_path)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def structure_path(self, path):
        """Provide pointers to the paths of the structure file

        Args:
            path: Path to structure file

        """
        if not path:
            self.structure_dir = None
            self.structure_file = None

        else:
            if not op.exists(path):
                raise OSError('{}: file does not exist!'.format(path))

            if not op.dirname(path):
                self.structure_dir = '.'
            else:
                self.structure_dir = op.dirname(path)
            self.structure_file = op.basename(path)
项目:oio-sds-utils    作者:open-io    | 项目源码 | 文件源码
def sharded_container(basedir, acct, cname, path, as_prefix=""):
    for prefix in prefixes():
        new_acct, new_cname = compute_new_cname(acct, cname, prefix)
        new_cid = cid_from_name(new_acct, new_cname)
        new_path = basedir + '/' + new_cid[0:3] + '/' + new_cid + '.1.meta2'
        logging.debug("%s %s %s %s", new_path, new_acct, new_cname, new_cid)

        try:
            makedirs(dirname(new_path))
        except OSError:
            pass

        try:
            from subprocess import check_call
            check_call(["/bin/cp", "-p", path, new_path])
            with connect(new_path) as db:
                prune_database(db, new_cname, new_cid,
                               ''.join([as_prefix, prefix]))
            print new_acct, new_cname, new_cid
        except Exception:
            from traceback import print_exc
            print_exc(file=stderr)
项目:geekcloud    作者:Mr-Linus    | 项目源码 | 文件源码
def renderTemplate(script_path, time_file_path, dimensions=(24, 80), templatename=DEFAULT_TEMPLATE):
    with copen(script_path, encoding='utf-8', errors='replace', newline='\r\n') as scriptf:
    # with open(script_path) as scriptf:
        with open(time_file_path) as timef:
            timing = getTiming(timef)
            json = scriptToJSON(scriptf, timing)

    fsl = FileSystemLoader(dirname(templatename), 'utf-8')
    e = Environment()
    e.loader = fsl

    templatename = basename(templatename)
    rendered = e.get_template(templatename).render(json=json,
                                                   dimensions=dimensions)

    return rendered
项目:bambi    作者:bambinos    | 项目源码 | 文件源码
def test_prior_factory_init_from_config():
    config_file = join(dirname(__file__), 'data', 'sample_priors.json')
    pf = PriorFactory(config_file)
    for d in ['dists', 'terms', 'families']:
        assert hasattr(pf, d)
        assert isinstance(getattr(pf, d), dict)
    config_dict = json.load(open(config_file, 'r'))
    pf = PriorFactory(config_dict)
    for d in ['dists', 'terms', 'families']:
        assert hasattr(pf, d)
        assert isinstance(getattr(pf, d), dict)
    assert 'feta' in pf.dists
    assert 'hard' in pf.families
    assert 'yellow' in pf.terms
    pf = PriorFactory(dists=config_dict['dists'])
    assert 'feta' in pf.dists
    pf = PriorFactory(terms=config_dict['terms'])
    assert 'yellow' in pf.terms
    pf = PriorFactory(families=config_dict['families'])
    assert 'hard' in pf.families
项目:bambi    作者:bambinos    | 项目源码 | 文件源码
def test_prior_retrieval():
    config_file = join(dirname(__file__), 'data', 'sample_priors.json')
    pf = PriorFactory(config_file)
    prior = pf.get(dist='asiago')
    assert prior.name == 'Asiago'
    assert isinstance(prior, Prior)
    assert prior.args['hardness'] == 10
    with pytest.raises(KeyError):
        assert prior.args['holes'] == 4
    family = pf.get(family='hard')
    assert isinstance(family, Family)
    assert family.link == 'grate'
    backup = family.prior.args['backup']
    assert isinstance(backup, Prior)
    assert backup.args['flavor'] == 10000
    prior = pf.get(term='yellow')
    assert prior.name == 'Swiss'

    # Test exception raising
    with pytest.raises(ValueError):
        pf.get(dist='apple')
    with pytest.raises(ValueError):
        pf.get(term='banana')
    with pytest.raises(ValueError):
        pf.get(family='cantaloupe')
项目:bambi    作者:bambinos    | 项目源码 | 文件源码
def __init__(self, defaults=None, dists=None, terms=None, families=None):

        if defaults is None:
            defaults = join(dirname(__file__), 'config', 'priors.json')

        if isinstance(defaults, string_types):
            defaults = json.load(open(defaults, 'r'))

        # Just in case the user plans to use the same defaults elsewhere
        defaults = deepcopy(defaults)

        if isinstance(dists, dict):
            defaults['dists'].update(dists)

        if isinstance(terms, dict):
            defaults['terms'].update(terms)

        if isinstance(families, dict):
            defaults['families'].update(families)

        self.dists = defaults['dists']
        self.terms = defaults['terms']
        self.families = defaults['families']
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def main():
    fusil_dir = dirname(__file__)
    sys_path.append(fusil_dir)

    # Test documentation in doc/*.rst files
    testDoc('doc/c_tools.rst')
    testDoc('doc/file_watch.rst')
    testDoc('doc/mangle.rst')

    # Unit tests as reST
    testDoc('tests/file_watch_read.rst')
    testDoc('tests/cmd_help_parser.rst')

    # Test documentation of some functions/classes
    testModule("fusil.tools")
    testModule("fusil.fuzzer.python")
项目:sublime-text-3-packages    作者:nickjj    | 项目源码 | 文件源码
def find_local_cmd_path(self, cmd):
        """
        Find a local binary in node_modules/.bin.

        Given package.json filepath and a local binary to find,
        look in node_modules/.bin for that binary.

        """

        cwd = path.dirname(self.manifest_path)

        binary = self.get_pkg_bin_cmd(cmd)

        if binary:
            return path.normpath(path.join(cwd, binary))

        return self.find_ancestor_cmd_path(cmd, cwd)
项目:underthesea    作者:magizbox    | 项目源码 | 文件源码
def __init__(self):
        self.model = pycrfsuite.Tagger()
        filepath = join(dirname(__file__), "model.bin")
        self.model.open(filepath)

        template = [
            "T[-2].lower", "T[-1].lower", "T[0].lower", "T[1].lower",
            "T[2].lower",
            "T[0].istitle", "T[-1].istitle", "T[1].istitle", "T[-2].istitle",
            "T[2].istitle",
            # word unigram and bigram
            "T[-2]", "T[-1]", "T[0]", "T[1]", "T[2]",
            "T[-2,-1]", "T[-1,0]", "T[0,1]", "T[1,2]",
            # pos unigram and bigram
            "T[-2][1]", "T[-1][1]", "T[0][1]", "T[1][1]", "T[2][1]",
            "T[-2,-1][1]", "T[-1,0][1]", "T[0,1][1]", "T[1,2][1]",
            # ner
            "T[-3][3]", "T[-2][3]", "T[-1][3]",
        ]
        self.transformer = TaggedTransformer(template)
项目:underthesea    作者:magizbox    | 项目源码 | 文件源码
def download_component(component_name):
    try:
        component = [component for component in components if
                     component["name"] == component_name][0]
        try:
            folder = dirname(dirname(__file__))
            file_name = join(folder, join(*component["destination"]))
            if isfile(file_name):
                print(
                "Component '{}' is already existed.".format(component["name"]))
            else:
                print("Start download component '{}'".format(component["name"]))
                print(file_name)
                download_file(component["url"], file_name)
                print("Finish download compoent '{}'".format(component["name"]))
        except Exception as e:
            print(e)
            print("Cannot download component '{}'".format(component["name"]))
    except:
        message = "Error: Component with name '{}' does not exist.".format(
            component_name)
        print(message)
项目:underthesea    作者:magizbox    | 项目源码 | 文件源码
def __init__(self):
        self.model = pycrfsuite.Tagger()
        filepath = join(dirname(__file__), "model.bin")
        self.model.open(filepath)

        template = [
            "T[-2].lower", "T[-1].lower", "T[0].lower", "T[1].lower",
            "T[2].lower",
            "T[0].istitle", "T[-1].istitle", "T[1].istitle",
            # word unigram and bigram
            "T[-2]", "T[-1]", "T[0]", "T[1]", "T[2]",
            "T[-2,-1]", "T[-1,0]", "T[0,1]", "T[1,2]",
            # pos unigram and bigram
            "T[-3][1]", "T[-2][1]", "T[-1][1]",
            "T[-3,-2][1]", "T[-2,-1][1]",
        ]
        self.transformer = TaggedTransformer(template)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def siblingExtensionSearch(self, *exts):
        """Attempt to return a path with my name, given multiple possible
        extensions.

        Each extension in exts will be tested and the first path which exists
        will be returned.  If no path exists, None will be returned.  If '' is
        in exts, then if the file referred to by this path exists, 'self' will
        be returned.

        The extension '*' has a magic meaning, which means "any path that
        begins with self.path+'.' is acceptable".
        """
        p = self.path
        for ext in exts:
            if not ext and self.exists():
                return self
            if ext == '*':
                basedot = basename(p)+'.'
                for fn in listdir(dirname(p)):
                    if fn.startswith(basedot):
                        return self.clonePath(joinpath(dirname(p), fn))
            p2 = p + ext
            if exists(p2):
                return self.clonePath(p2)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def modulesInPackage(self, packageName, package):
        docless = []
        directory = path.dirname(package.__file__)
        for modfile in glob.glob(path.join(directory, '*.py')):
            moduleName = inspect.getmodulename(modfile)
            if moduleName == '__init__':
                # These are tested by test_packages.
                continue
            elif moduleName in ('spelunk_gnome','gtkmanhole'):
                # argh special case pygtk evil argh.  How does epydoc deal
                # with this?
                continue
            try:
                module = reflect.namedModule('.'.join([packageName,
                                                       moduleName]))
            except Exception, e:
                # print moduleName, "misbehaved:", e
                pass
            else:
                if not inspect.getdoc(module):
                    docless.append(modfile)
        return docless
项目:enigma2    作者:OpenLD    | 项目源码 | 文件源码
def saveFile(filename, data, mode=0644):
    tmpFilename = None
    try:
        f = NamedTemporaryFile(prefix='.%s.' % path.basename(filename), dir=path.dirname(filename), delete=False)
        tmpFilename = f.name
        if isinstance(data, list):
            for x in data:
                f.write(x)
        else:
            f.write(data)
        f.flush()
        fsync(f.fileno())
        fchmod(f.fileno(), mode)
        f.close()
        rename(tmpFilename, filename)
    except Exception as e:
        print 'saveFile: failed to write to %s: %s' % (filename, e)
        if tmpFilename and path.exists(tmpFilename):
            unlink(tmpFilename)
        return False

    return True
项目:Main    作者:N-BodyPhysicsSimulator    | 项目源码 | 文件源码
def dirname_is_existing_dir(path: str) -> str:
    """
    >>> import tempfile

    >>> with tempfile.TemporaryDirectory() as dir:
    ...     dirname_is_existing_dir(dir) == dir
    True

    >>> dirname_is_existing_dir('/non/existing/dir')
    Traceback (most recent call last):
    argparse.ArgumentTypeError: Dirname of path is not an existing directory.
    """

    if isdir(dirname(abspath(path))):
        return path
    else:
        raise ArgumentTypeError("Dirname of path is not an existing directory.")
项目:puppeter    作者:coi-gov-pl    | 项目源码 | 文件源码
def __load_modules(module_name):
    import os
    from os.path import join, abspath, isdir, exists
    rootdir = dirname(dirname(__file__))

    search = join(abspath(rootdir), module_name.replace('.', os.sep))
    lst = os.listdir(search)
    modules = []
    for d in lst:
        subpath = join(search, d)
        if isdir(subpath) and exists(join(subpath, '__init__.py')):
            submodule_name = module_name + '.' + d
            __load_modules(submodule_name)
            modules.append(submodule_name)
    # load the modules
    for module_name_to_import in modules:
        __import__(module_name_to_import)
项目:embeddings    作者:vzhong    | 项目源码 | 文件源码
def download_file(url, local_filename):
        """
        Downloads a file from an url to a local file.

        Args:
            url (str): url to download from.
            local_filename (str): local file to download to.

        Returns:
            str: file name of the downloaded file.

        """
        r = requests.get(url, stream=True)
        if path.dirname(local_filename) and not path.isdir(path.dirname(local_filename)):
            raise Exception(local_filename)
            makedirs(path.dirname(local_filename))
        with open(local_filename, 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024):
                if chunk:
                    f.write(chunk)
        return local_filename
项目:embeddings    作者:vzhong    | 项目源码 | 文件源码
def initialize_db(fname):
        """

        Args:
            fname (str): location of the database.

        Returns:
            db (sqlite3.Connection): a SQLite3 database with an embeddings table.

        """
        if path.dirname(fname) and not path.isdir(path.dirname(fname)):
            makedirs(path.dirname(fname))
        db = sqlite3.connect(fname)
        c = db.cursor()
        c.execute('create table if not exists embeddings(word text primary key, emb blob)')
        db.commit()
        return db
项目:buttervolume    作者:anybox    | 项目源码 | 文件源码
def schedule():
    """Schedule or unschedule a job
    TODO add a lock
    """
    name = jsonloads(request.body.read())['Name']
    timer = jsonloads(request.body.read())['Timer']
    action = jsonloads(request.body.read())['Action']
    schedule = []
    if timer:  # 0 means unschedule!
        schedule.append((name, action, timer))
    if os.path.exists(SCHEDULE):
        with open(SCHEDULE) as f:
            for n, a, t in csv.reader(f):
                # skip the line we want to write
                if n == name and a == action:
                    continue
                schedule.append((n, a, t))
    os.makedirs(dirname(SCHEDULE), exist_ok=True)
    with open(SCHEDULE, 'w') as f:
        for line in schedule:
            csv.writer(f).writerow(line)
    return json.dumps({'Err': ''})
项目:website-fingerprinting    作者:AxelGoetz    | 项目源码 | 文件源码
def extract_all_features(save_dir, data_dir=DATA_DIR, extension=".cell"):
    from naive_bayes import extract_nb_features
    from random_forest import extract_rf_features
    from svc1 import extract_svc1_features
    from svc2 import extract_svc2_features
    import subprocess

    create_dir_if_not_exists(save_dir + '/knn_cells/')
    subprocess.run([
        'go', 'run', dirname + '/kNN.go', '-folder', data_dir + '/',
        '-new_path', save_dir + '/knn_cells/', '-extension', extension]
    )

    # extract_features(extract_nb_features, save_dir + '/nb_cells', data_dir=data_dir, extension=extension, model_name="naive bayes")
    extract_features(extract_rf_features, save_dir + '/rf_cells', data_dir=data_dir, extension=extension, model_name="random forest")
    extract_features(extract_svc1_features, save_dir + '/svc1_cells', data_dir=data_dir, extension=extension, model_name="svc1")
    extract_features(extract_svc2_features, save_dir + '/svc2_cells', data_dir=data_dir, extension=extension, model_name="svc2")

    stdout.write("Finished extracting features\n")
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def draw_dag(nx_dag, path):
    if dirname(path) != '':
        mkdir_p(dirname(path))
    agraph = nx.nx_agraph.to_agraph(nx_dag)
    for edge in agraph.edges_iter():
        if edge.attr['nonskipped_keys'] is None:
            edge.attr['label'] = edge.attr['keys']
        else:
            edge.attr['label'] = ""
            if edge.attr['nonskipped_keys'] not in ["set()", "set([])"]:
                edge.attr['label'] += edge.attr['nonskipped_keys']
            if (edge.attr['skipped_keys'] not in ["set()", "set([])"]
                    and edge.attr['skipped_keys'] is not None):
                edge.attr['label'] += "(%s skipped)" % edge.attr['skipped_keys']
    for node in agraph.nodes_iter():
        if node.attr['skipped'] == "True":
            node.attr['label'] = node.attr['__name__'] + " (skipped)"
            node.attr['fontcolor'] = 'grey'
        else:
            node.attr['label'] = node.attr['__name__']
    agraph.layout('dot')
    agraph.draw(path)
项目:metatab    作者:Metatab    | 项目源码 | 文件源码
def declaration_path(name):
    """Return the path to an included declaration"""
    from os.path import dirname, join, exists
    import  metatabdecl
    from metatab.exc import IncludeError

    d = dirname(metatabdecl.__file__)

    path = join(d, name)

    if not exists(path):
        path = join(d, name + '.csv')

    if not exists(path):
        raise IncludeError("No local declaration file for name '{}' ".format(name))

    return path


# From http://stackoverflow.com/a/295466
项目:riko    作者:nerevu    | 项目源码 | 文件源码
def get_abspath(url):
    url = 'http://%s' % url if url and '://' not in url else url

    if url and url.startswith('file:///'):
        # already have an abspath
        pass
    elif url and url.startswith('file://'):
        parent = p.dirname(p.dirname(__file__))
        rel_path = url[7:]
        abspath = p.abspath(p.join(parent, rel_path))
        url = 'file://%s' % abspath

    return decode(url)


# https://trac.edgewall.org/ticket/2066#comment:1
# http://stackoverflow.com/a/22675049/408556
项目:argparseinator    作者:ellethee    | 项目源码 | 文件源码
def __init__(self, name, dest, description=None):
        self.name = name
        wholetitle = "{} :core:`{}.{}`".format(
            name.title(), basename(dirname(dest)), name)
        wholetitlemark = "=" * len(wholetitle)
        description = description or ''
        self.info = dict(
            prj_name=name,
            prj_title=name.title(),
            prj_titlemark="=" * len(name.title()),
            mod_path=name,
            title=name.title(),
            titlemark="=" * len(name.title()),
            wholetitle=wholetitle,
            wholetitlemark=wholetitlemark,
            description=description,
        )
        super(DevFormatter, self).__init__()
项目:DCRM    作者:82Flex    | 项目源码 | 文件源码
def get_app_template_dir(app_name):
    """Get the template directory for an application

    We do not use django.db.models.get_app, because this will fail if an
    app does not have any models.

    Returns a full path, or None if the app was not found.
    """
    from django.conf import settings
    from importlib import import_module
    if app_name in _cache:
        return _cache[app_name]
    template_dir = None
    for app in settings.INSTALLED_APPS:
        if app.split('.')[-1] == app_name:
            # Do not hide import errors; these should never happen at this point
            # anyway
            mod = import_module(app)
            template_dir = join(abspath(dirname(mod.__file__)), 'templates')
            break
    _cache[app_name] = template_dir
    return template_dir
项目:python-markov-novel    作者:accraze    | 项目源码 | 文件源码
def test_main():
    """
    Basic functional test
    """
    assert markov_novel
    path = 'tmp'
    os.makedirs(path)
    os.chdir(path)
    # Get raw text as string.
    from os.path import dirname, abspath
    filename = os.path.join(
        dirname(dirname(abspath(__file__))), 'tests/futuristmanifest.txt')
    with open(filename) as f:
        text = f.read()
    # Build the model.
    text_model = markovify.Text(text)
    novel = markov_novel.Novel(text_model, chapter_count=1)
    novel.write(novel_title='my-novel', filetype='md')
    assert os.path.exists(os.path.join(os.getcwd(), 'my-novel.md'))
    os.chdir(os.pardir)
    shutil.rmtree('tmp', ignore_errors=True)
项目:dilation    作者:fyu    | 项目源码 | 文件源码
def __init__(self, dataset_name):
        self.work_dir = dirname(__file__)
        info_path = join(self.work_dir, 'datasets', dataset_name + '.json')
        if not exists(info_path):
            raise IOError("Do not have information for dataset {}"
                          .format(dataset_name))
        with open(info_path, 'r') as fp:
            info = json.load(fp)
        self.palette = np.array(info['palette'], dtype=np.uint8)
        self.mean_pixel = np.array(info['mean'], dtype=np.float32)
        self.dilation = info['dilation']
        self.zoom = info['zoom']
        self.name = dataset_name
        self.model_name = 'dilation{}_{}'.format(self.dilation, self.name)
        self.model_path = join(self.work_dir, 'models',
                               self.model_name + '_deploy.prototxt')
项目:PhonePerformanceMeasure    作者:KyleCe    | 项目源码 | 文件源码
def convert_file_into_directory(file_with_full_path):
    path = dirname(file_with_full_path)
    tags = os.path.split(file_with_full_path)
    file_name = tags[1]
    convert_and_store_in_directory(path, file_name)
项目:newsreap    作者:caronc    | 项目源码 | 文件源码
def touch(self, path, size=None, random=False, perm=None, time=None):
        """Simplify the dynamic creation of files or the updating of their
        modified time.  If a size is specified, then a file of that size
        will be created on the disk. If the file already exists, then the
        size= attribute is ignored (for safey reasons).

        if random is set to true, then the file created is actually
        created using tons of randomly generated content.  This is MUCH
        slower but nessisary for certain tests.

        """

        path = abspath(path)
        if not isdir(dirname(path)):
            mkdir(dirname(path), 0700)

        if not exists(path):
            size = strsize_to_bytes(size)

            if not random:
                f = open(path, "wb")
                if isinstance(size, int) and size > 0:
                    f.seek(size-1)
                    f.write("\0")
                f.close()

            else: # fill our file with randomly generaed content
                with open(path, 'wb') as f:
                    # Fill our file with garbage
                    f.write(urandom(size))

        # Update our path
        utime(path, time)

        if perm is not None:
            # Adjust permissions
            chmod(path, perm)

        # Return True
        return True
项目:newsreap    作者:caronc    | 项目源码 | 文件源码
def test_writes(self):
        """
        More overhead then a normal write() but none the less, using the
        write() in this class keeps things simple since the file is
        automatically opened if it was otherwise closed
        """

        # First we create a 1MB file
        tmp_file = join(self.tmp_dir, 'NNTPContent_Test.write', 'tmp.file')
        # File should not already exist
        assert(isfile(tmp_file) is False)

        # Now we want to create our NNTPContent() object surrouding this
        # file that does not exist.
        content = NNTPContent(filepath=tmp_file, work_dir=dirname(tmp_file))
        # It's worth noting that this file will 'still' not exist
        assert(isfile(tmp_file) is False)
        # we'll write data

        data = 'hello\r\n'
        content.write(data)

        # It's worth noting that this file will ''STILL'' not exist
        assert(isfile(tmp_file) is False)

        # Save content
        assert(content.save() is True)

        # Now the file 'will' exist
        assert(isfile(tmp_file) is True)

        # Open our file and verify it is the data we saved.
        with open(tmp_file) as f:
            data_read = f.read()
        assert(data == data_read)
项目:logscan    作者:magedu    | 项目源码 | 文件源码
def start(self):
        self.check_chain.start()
        self.observer.schedule(self, path.dirname(self.filename), recursive=False)
        self.observer.start()
        self.observer.join()
项目:Blender-power-sequencer    作者:GDquest    | 项目源码 | 文件源码
def execute(self, context):
        if not bpy.data.is_saved:
            self.report({'WARNING'}, "Save your file first")
            return {'CANCELLED'}

        script_file = os.path.realpath(__file__)
        addon_directory = os.path.dirname(script_file)

        # audio
        if bpy.context.scene.render.ffmpeg.audio_codec == 'NONE':
            bpy.context.scene.render.ffmpeg.audio_codec = 'AAC'
            bpy.context.scene.render.ffmpeg.audio_bitrate = 192

        # video
        if self.preset == 'youtube':
            bpy.ops.script.python_file_run(filepath=os.path.join(addon_directory, 'render_presets', 'youtube_1080.py'))
        elif self.preset == 'twitter':
            bpy.ops.script.python_file_run(filepath=os.path.join(addon_directory, 'render_presets', 'twitter_720p.py'))

        from os.path import splitext, dirname
        path = bpy.data.filepath

        exported_file_name = 'video'
        if self.name_pattern == 'blender':
            exported_file_name = splitext(bpy.path.basename(path))[0]
        elif self.name_pattern == 'folder':
            exported_file_name = dirname(path).rsplit(sep="\\", maxsplit=1)[-1]
        elif self.name_pattern == 'scene':
            exported_file_name = bpy.context.scene.name

        bpy.context.scene.render.filepath = "//" + exported_file_name + '.mp4'
        if self.auto_render:
            bpy.ops.render.render({'dict': "override"}, 'INVOKE_DEFAULT', animation=True)
        return {"FINISHED"}
项目:pytest-cython    作者:lgpage    | 项目源码 | 文件源码
def read(*names, **kwargs):
    return io.open(
        join(dirname(__file__), *names),
        encoding=kwargs.get('encoding', 'utf8')
    ).read()
项目:cookiecutter-django-app    作者:edx    | 项目源码 | 文件源码
def root(*args):
    """
    Get the absolute path of the given path relative to the project root.
    """
    return join(abspath(dirname(__file__)), *args)
项目:pynini    作者:daffidilly    | 项目源码 | 文件源码
def determine(argv=None):
        argv = argv or sys.argv  # use command-line flags if nothing else passed

        arg_parser = ArgumentParser(prog='pynini', description='Static site processor')
        arg_parser.add_argument('--verbosity', '-v', action='count', help='increase log verbosity', default=0)
        arg_parser.add_argument('--dist', metavar='dist_dir', help='specify dist directory (disable auto detect)', default=None)
        arg_parser.add_argument('--src', '-s', metavar='src_dir', help='specify src directory (disable auto detect)', default=None)
        arg_parser.add_argument('--pages', metavar='pages_dir', help='specify pages directory (disable auto detect)', default=None)
        arg_parser.add_argument('--layouts', metavar='layouts_dir', help='specify layouts directory (disable auto detect)', default=None)
        arg_parser.add_argument('--partials', metavar='partials_dir', help='specify partials directory (disable auto detect)', default=None)
        parsed_args = arg_parser.parse_args(argv[1:])
        # print(parsed_args)

        # each of the below can be None
        dist_dir = parsed_args.dist
        src_dir = parsed_args.src
        pages_dir = parsed_args.pages
        layouts_dir = parsed_args.layouts
        partials_dir = parsed_args.partials

        operation_dir = getcwd()
        if isdir(join(operation_dir, 'src')):
            src_dir = src_dir or join(operation_dir, 'src')
            dist_dir = dist_dir or join(operation_dir, 'dist')

        elif 'src' == basename(operation_dir):  # we're inside the src dir
            src_dir = src_dir or operation_dir
            dist_dir = dist_dir or join(dirname(operation_dir), 'dist')

        elif not src_dir or not dist_dir:
            raise SetupError('Could not determine src_dir, dist_dir')

        return Setup(operation_dir,
                     dist_dir,
                     src_dir,
                     pages_dir=pages_dir,
                     layouts_dir=layouts_dir,
                     partials_dir=partials_dir,
                     verbosity=parsed_args.verbosity,
                     template_loader=None,
                     template_writer=None)
项目:epubcheck    作者:titusz    | 项目源码 | 文件源码
def read(*names, **kwargs):
    return io.open(
        join(dirname(__file__), *names),
        encoding=kwargs.get('encoding', 'utf8')
    ).read()
项目:python-twelve-tone    作者:accraze    | 项目源码 | 文件源码
def read(*names, **kwargs):
    return io.open(
        join(dirname(__file__), *names),
        encoding=kwargs.get('encoding', 'utf8')
    ).read()