Python os.path 模块,exists() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用os.path.exists()

项目:CodingDojo    作者:ComputerSocietyUNB    | 项目源码 | 文件源码
def handle_template(self, template, subdir):
        """
        Determines where the app or project templates are.
        Use django.__path__[0] as the default because we don't
        know into which directory Django has been installed.
        """
        if template is None:
            return path.join(django.__path__[0], 'conf', subdir)
        else:
            if template.startswith('file://'):
                template = template[7:]
            expanded_template = path.expanduser(template)
            expanded_template = path.normpath(expanded_template)
            if path.isdir(expanded_template):
                return expanded_template
            if self.is_url(template):
                # downloads the file and returns the path
                absolute_path = self.download(template)
            else:
                absolute_path = path.abspath(expanded_template)
            if path.exists(absolute_path):
                return self.extract(absolute_path)

        raise CommandError("couldn't handle %s template %s." %
                           (self.app_or_project, template))
项目:microbar    作者:Bengt    | 项目源码 | 文件源码
def install_python(version, arch, home):
    print("Installing Python", version, "for", arch, "bit architecture to", home)
    if exists(home):
        return

    path = download_python(version, arch)
    print("Installing", path, "to", home)
    success = False
    for cmd in INSTALL_CMD[version]:
        cmd = [part.format(home=home, path=path) for part in cmd]
        print("Running:", " ".join(cmd))
        try:
            check_call(cmd)
        except CalledProcessError as exc:
            print("Failed command", cmd, "with:", exc)
            if exists("install.log"):
                with open("install.log") as fh:
                    print(fh.read())
        else:
            success = True
    if success:
        print("Installation complete!")
    else:
        print("Installation failed")
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def connectionMade(self):
        dst = path.abspath(path.join(self.destDir,self.filename))
        exists = path.exists(dst)
        if self.resume and exists:
            # I have been told I want to resume, and a file already
            # exists - Here we go
            self.file = open(dst, 'ab')
            log.msg("Attempting to resume %s - starting from %d bytes" %
                    (self.file, self.file.tell()))
        elif self.overwrite or not exists:
            self.file = open(dst, 'wb')
        else:
            raise OSError(errno.EEXIST,
                          "There's a file in the way.  "
                          "Perhaps that's why you cannot open it.",
                          dst)
项目:pytest-cython    作者:lgpage    | 项目源码 | 文件源码
def install_python(version, arch, home):
    print("Installing Python", version, "for", arch, "bit architecture to", home)
    if exists(home):
        return

    path = download_python(version, arch)
    print("Installing", path, "to", home)
    success = False
    for cmd in INSTALL_CMD[version]:
        cmd = [part.format(home=home, path=path) for part in cmd]
        print("Running:", " ".join(cmd))
        try:
            check_call(cmd)
        except Exception as exc:
            print("Failed command", cmd, "with:", exc)
            if exists("install.log"):
                with open("install.log") as fh:
                    print(fh.read())
        else:
            success = True
    if success:
        print("Installation complete!")
    else:
        print("Installation failed")
项目:epubcheck    作者:titusz    | 项目源码 | 文件源码
def install_python(version, arch, home):
    print("Installing Python", version, "for", arch, "bit architecture to", home)
    if exists(home):
        return

    path = download_python(version, arch)
    print("Installing", path, "to", home)
    success = False
    for cmd in INSTALL_CMD[version]:
        cmd = [part.format(home=home, path=path) for part in cmd]
        print("Running:", " ".join(cmd))
        try:
            check_call(cmd)
        except Exception as exc:
            print("Failed command", cmd, "with:", exc)
            if exists("install.log"):
                with open("install.log") as fh:
                    print(fh.read())
        else:
            success = True
    if success:
        print("Installation complete!")
    else:
        print("Installation failed")
项目:python-twelve-tone    作者:accraze    | 项目源码 | 文件源码
def install_python(version, arch, home):
    print("Installing Python", version, "for", arch, "bit architecture to", home)
    if exists(home):
        return

    path = download_python(version, arch)
    print("Installing", path, "to", home)
    success = False
    for cmd in INSTALL_CMD[version]:
        cmd = [part.format(home=home, path=path) for part in cmd]
        print("Running:", " ".join(cmd))
        try:
            check_call(cmd)
        except CalledProcessError as exc:
            print("Failed command", cmd, "with:", exc)
            if exists("install.log"):
                with open("install.log") as fh:
                    print(fh.read())
        else:
            success = True
    if success:
        print("Installation complete!")
    else:
        print("Installation failed")
项目:PyPlanet    作者:PyPlanet    | 项目源码 | 文件源码
def install_python(version, arch, home):
    print("Installing Python", version, "for", arch, "bit architecture to", home)
    if exists(home):
        return

    path = download_python(version, arch)
    print("Installing", path, "to", home)
    success = False
    for cmd in INSTALL_CMD[version]:
        cmd = [part.format(home=home, path=path) for part in cmd]
        print("Running:", " ".join(cmd))
        try:
            check_call(cmd)
        except Exception as exc:
            print("Failed command", cmd, "with:", exc)
            if exists("install.log"):
                with open("install.log") as fh:
                    print(fh.read())
        else:
            success = True
    if success:
        print("Installation complete!")
    else:
        print("Installation failed")
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def __init__(self, conn_or_path, calendar, daily_bar_reader,
                 overwrite=False):
        if isinstance(conn_or_path, sqlite3.Connection):
            self.conn = conn_or_path
        elif isinstance(conn_or_path, str):
            if overwrite and exists(conn_or_path):
                try:
                    remove(conn_or_path)
                except OSError as e:
                    if e.errno != ENOENT:
                        raise
            self.conn = sqlite3.connect(conn_or_path)
        else:
            raise TypeError("Unknown connection type %s" % type(conn_or_path))

        self._daily_bar_reader = daily_bar_reader
        self._calendar = calendar
项目:foremast    作者:gogoair    | 项目源码 | 文件源码
def extract_formats(config_handle):
    """Get application formats.

    Args:
        config_handle (configparser.ConfigParser): Instance of configurations.

    Returns:
        object: ``str`` when *key* exists, otherwise *default* object.
        dict: of formats in {$format_type: $format_pattern}.
        See (gogoutils.Formats) for available options.
    """
    formats = {}

    if config_handle.has_section('formats'):
        formats = dict(config_handle['formats'])

    return formats
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def load_url_files(_dir, file_name_prefix):
    url_list = []

    ttl_url_list_file_name = osp.join(_dir, file_name_prefix +'_all.txt')
    if osp.exists(ttl_url_list_file_name):
        fp_urls = open(ttl_url_list_file_name, 'r')        #Open the text file called database.txt
        print 'load URLs from file: ' + ttl_url_list_file_name

        i = 0
        for line in fp_urls:
            line = line.strip()
            if len(line)>0:
                splits = line.split('\t')
                url_list.append(splits[0].strip())
                i=i+1

        print str(i) + ' URLs loaded'
        fp_urls.close()             
    else:
        url_list = load_all_url_files(_dir, file_name_prefix)

    return url_list
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_keep_checkpoints(self):
        """
        Test if the checkpoints are kept.

        This is regression test for issue #71 (TF ``Saver`` is keeping only the last 5 checkpoints).
        """
        dummy_model = SimpleModel(dataset=None, log_dir=self.tmpdir, inputs=[], outputs=['output'])

        checkpoints = []
        for i in range(20):
            checkpoints.append(dummy_model.save(str(i)))

        for checkpoint in checkpoints:
            self.assertTrue(path.exists(checkpoint+'.index'))
            self.assertTrue(path.exists(checkpoint+'.meta'))
            data_prefix = path.basename(checkpoint)+'.data'
            data_files = [file for file in os.listdir(path.dirname(checkpoint)) if file.startswith(data_prefix)]
            self.assertGreater(len(data_files), 0)
项目:sc8pr    作者:dmaccarthy    | 项目源码 | 文件源码
def run(self):
        # Convert PIL images to Video instance
        sk = self.sk
        vid = Video()
        vid.meta["frameRate"] = self.fps
        print("Converting...", file=stderr)
        n = len(self.frames)
        for f in self.frames:
            vid += sk.grab.image(0, img=f)
            n -= 1
            if n and n % 25 == 0: print(n, file=stderr)

        # Save Video as s8v file
        n = 1
        while exists(self.filename(n)): n += 1
        fn = self.filename(n)
        print("Saving '{}'...".format(fn), file=stderr)
        vid.save(fn)
        print("Done!", file=stderr)
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def initsysfonts_darwin():
    """read the fonts on OS X. X11 is required for this to work."""
    # if the X11 binary exists... try and use that.
    #  Not likely to be there on pre 10.4.x ...
    if exists("/usr/X11/bin/fc-list"):
        fonts = initsysfonts_unix("/usr/X11/bin/fc-list")
    # This fc-list path will work with the X11 from the OS X 10.3 installation
    # disc
    elif exists("/usr/X11R6/bin/fc-list"):
        fonts = initsysfonts_unix("/usr/X11R6/bin/fc-list")
    else:
        fonts = {}

    return fonts


# read the fonts on unix
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def load_parameters(stack):
    """load parameters from yaml file and return as dictionary"""
    params = []
    param_path = path.join('stacks', stack, 'parameters.yaml')

    if not path.exists(param_path):
        return params

    with open(param_path, encoding='utf-8') as file:
        params_raw = yaml.load(file.read())

        # build parameter dict
        for param in params_raw.keys():
            params.append({
                'ParameterKey': param,
                'ParameterValue': params_raw[param]
            })
    return params
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def save_parameters(stack, params):
    """saves parameters to disk"""
    # decode parameter dict
    params_dict = {}
    for param in params:
        params_dict[param['ParameterKey']] = param['ParameterValue']

    stack_dir = path.join('stacks', stack)
    param_path = path.join(stack_dir, 'parameters.yaml')

    # ensure paths are present
    if not path.exists('stacks'):
        mkdir('stacks')
    if not path.exists(stack_dir):
        mkdir(stack_dir)

    with open(param_path, mode='w', encoding='utf-8') as file:
        file.write(yaml.dump(params_dict, default_flow_style=False, explicit_start=True))
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def clone(args):
    """clone local stack"""
    # source and destination paths
    src = args.stack
    dst = args.new_stack

    # require stack to exist locally
    if src not in local_stacks():
        LOG.error("no such local stack: " + src)
        sys.exit(1)

    # overwrite of existing local stack requires force
    if args.force and path.exists(path.join('stacks', dst)):
        LOG.warning('stack ' + dst + ' already exists, use --force to overwrite')
        sys.exit(1)

    save_template(dst, load_template(src))
    if path.exists(path.join('stacks', src, 'parameters.yaml')):
        save_parameters(dst, load_parameters(src))
项目:jack-matchmaker    作者:SpotlightKid    | 项目源码 | 文件源码
def get_cards():
    """Get card info from /proc/asound/cards."""

    if not exists(PROC_CARDS):
        raise IOError("'%s' does not exist. ALSA not loaded?" % PROC_CARDS)

    with open(PROC_CARDS, 'r', encoding='utf-8') as procfile:
        # capture card number, id and name
        cardline = re.compile(
            r'^\s*(?P<num>\d+)\s*'  # card number
            r'\[(?P<id>\w+)\s*\]:'  # card ID
            r'.*-\s(?P<name>.*)$')  # card name

        for line in procfile:
            match = cardline.match(line)

            if match:
                yield AlsaCardInfo(card_num=int(match.group('num')),
                                   id=match.group('id').strip(),
                                   name=match.group('name').strip())
项目:jack-matchmaker    作者:SpotlightKid    | 项目源码 | 文件源码
def get_pcm_devices():
    """Get PCM device numbers and names from /proc/asound/pcm."""

    if not exists(PROC_DEVICES):
        raise IOError("'%s' does not exist. ALSA not loaded?" % PROC_DEVICES)

    with open(PROC_DEVICES, 'r', encoding='utf-8') as procfile:
        devnum = re.compile(r'(?P<card_num>\d+)-(?P<dev_num>\d+)')

        for line in procfile:
            fields = [l.strip() for l in line.split(':')]

            if len(fields) >= 3:
                match = devnum.match(fields[0])

                if match:
                    yield AlsaPcmInfo(card_num=int(match.group('card_num')),
                                      dev_num=int(match.group('dev_num')),
                                      id=fields[1],
                                      name=fields[2],
                                      playback='playback 1' in fields,
                                      capture='capture 1' in fields)
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handshake_secret_load(secret_handshake_path, create=False):
        '''
        Load and decode the base64 encoded secret handshake string from the
        file at secret_handshake_path.
        If create is true, and the file does not exist, create a new file
        containing a random, base64-encoded secret handshake string.
        '''
        # generate a new key if the file does not exist
        # having the protocol deal with config files is an abstraction layer
        # violation, but it yields better security properties, as only the
        # protocol layer ever knows the secret key (and it is discarded after
        # it is used to generate or verify the HMACs)
        if not path.exists(secret_handshake_path):
            secret_handshake = PrivCountProtocol.handshake_secret_generate()
            with open(secret_handshake_path, 'w') as fin:
                fin.write(secret_handshake)
        # read from the file (even if we just generated it)
        with open(secret_handshake_path, 'r') as fin:
            # read the whole file, but ignore whitespace
            secret_handshake = fin.read().strip()
        # decode
        secret_handshake = PrivCountProtocol.handshake_secret_verify(
            secret_handshake)
        return secret_handshake
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def download_sifts_xml(pdb_id, outdir='', outfile=''):
    """Download the SIFTS file for a PDB ID.

    Args:
        pdb_id:
        outdir:
        outfile:

    Returns:

    """
    baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
    filename = '{}.xml.gz'.format(pdb_id)

    if outfile:
        outfile = op.join(outdir, outfile)
    else:
        outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')

    if not op.exists(outfile):
        response = urlopen(baseURL + filename)
        with open(outfile, 'wb') as f:
            f.write(gzip.decompress(response.read()))

    return outfile
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def metadata_path(self, m_path):
        """Provide pointers to the paths of the metadata file

        Args:
            m_path: Path to metadata file

        """
        if not m_path:
            self.metadata_dir = None
            self.metadata_file = None

        else:
            if not op.exists(m_path):
                raise OSError('{}: file does not exist!'.format(m_path))

            if not op.dirname(m_path):
                self.metadata_dir = '.'
            else:
                self.metadata_dir = op.dirname(m_path)
            self.metadata_file = op.basename(m_path)

            # TODO: update using Biopython's built in SeqRecord parser
            # Just updating IDs and stuff
            self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def test_write_gff_file(self, seqprop_with_i, tmpdir):
        """Test writing the features, and that features are now loaded from a file"""
        outpath = tmpdir.join('test_seqprop_with_i_write_gff_file.gff').strpath
        seqprop_with_i.write_gff_file(outfile=outpath, force_rerun=True)

        # Test that the file was written
        assert op.exists(outpath)
        assert op.getsize(outpath) > 0

        # Test that file paths are correct
        assert seqprop_with_i.feature_path == outpath
        assert seqprop_with_i.feature_file == 'test_seqprop_with_i_write_gff_file.gff'
        assert seqprop_with_i.feature_dir == tmpdir

        # Test that features cannot be changed
        with pytest.raises(ValueError):
            seqprop_with_i.features = ['NOFEATURES']
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def request_file(link, outfile, force_rerun_flag=False):
    """Download a file given a URL if the outfile does not exist already.

    Args:
        link (str): Link to download file.
        outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does
            exist, unless force_rerun_flag is True.
        force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.

    Returns:
        str: Path to downloaded file.

    """
    if force_rerun(flag=force_rerun_flag, outfile=outfile):
        req = requests.get(link)
        if req.status_code == 200:
            with open(outfile, 'w') as f:
                f.write(req.text)
            log.debug('Loaded and saved {} to {}'.format(link, outfile))
        else:
            log.error('{}: request error {}'.format(link, req.status_code))
    return outfile
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def root_dir(self, path):
        if not path:
            raise ValueError('No path specified')

        if not op.exists(path):
            raise ValueError('{}: folder does not exist'.format(path))

        if self._root_dir:
            log.info('Changing root directory of project "{}" from {} to {}'.format(self.id, self.root_dir, path))

            if not op.exists(op.join(path, self.id)):
                raise IOError('Project "{}" does not exist in folder {}'.format(self.id, path))
        else:
            log.info('Creating project directory in folder {}'.format(path))

        self._root_dir = path

        for d in [self.base_dir, self.model_dir, self.data_dir,
                  self.sequences_dir, self.sequences_by_gene_dir, self.sequences_by_organism_dir]:
            ssbio.utils.make_dir(d)

        log.info('{}: project location'.format(self.base_dir))
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False):
        """Run freesasa on structures and store calculations.

        Annotations are stored in the protein structure's chain sequence at:
        ``<chain_prop>.seq_record.letter_annotations['*-freesasa']``

        Args:
            include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
            representative_only (bool): If analysis should only be run on the representative structure
            force_rerun (bool): If calculations should be rerun even if an output file exists

        """
        for g in tqdm(self.genes):
            g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
                                               representative_only=representatives_only,
                                               force_rerun=force_rerun)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def metadata_path(self, m_path):
        """Provide pointers to the paths of the metadata file

        Args:
            m_path: Path to metadata file

        """
        if not m_path:
            self.metadata_dir = None
            self.metadata_file = None

        else:
            if not op.exists(m_path):
                raise OSError('{}: file does not exist!'.format(m_path))

            if not op.dirname(m_path):
                self.metadata_dir = '.'
            else:
                self.metadata_dir = op.dirname(m_path)
            self.metadata_file = op.basename(m_path)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def feature_path(self, gff_path):
        """Load a GFF file with information on a single sequence and store features in the ``features`` attribute

        Args:
            gff_path: Path to GFF file.

        """
        if not gff_path:
            self.feature_dir = None
            self.feature_file = None

        else:
            if not op.exists(gff_path):
                raise OSError('{}: file does not exist!'.format(gff_path))

            if not op.dirname(gff_path):
                self.feature_dir = '.'
            else:
                self.feature_dir = op.dirname(gff_path)
            self.feature_file = op.basename(gff_path)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def root_dir(self, path):
        if not path:
            raise ValueError('No path specified')

        if not op.exists(path):
            raise ValueError('{}: folder does not exist'.format(path))

        if self._root_dir:
            log.debug('Changing root directory of DOCK project for "{}" from {} to {}'.format(self.id, self.root_dir, path))

            if not op.exists(op.join(path, self.id)):
                raise IOError('DOCK project "{}" does not exist in folder {}'.format(self.id, path))

        self._root_dir = path

        for d in [self.dock_dir]:
            ssbio.utils.make_dir(d)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def structure_path(self, path):
        """Provide pointers to the paths of the structure file

        Args:
            path: Path to structure file

        """
        if not path:
            self.structure_dir = None
            self.structure_file = None

        else:
            if not op.exists(path):
                raise OSError('{}: file does not exist!'.format(path))

            if not op.dirname(path):
                self.structure_dir = '.'
            else:
                self.structure_dir = op.dirname(path)
            self.structure_file = op.basename(path)
项目:ssbio    作者:SBRG    | 项目源码 | 文件源码
def dms_maker(self, force_rerun=False):
        """Create surface representation (dms file) of receptor

        Args:
            force_rerun (bool): If method should be rerun even if output file exists

        """
        log.debug('{}: running surface representation maker...'.format(self.id))

        if not self.receptorpdb_path:
            return ValueError('Please run protein_only_and_noH')

        dms = op.join(self.dock_dir, '{}_receptor.dms'.format(self.id))

        if ssbio.utils.force_rerun(flag=force_rerun, outfile=dms):
            cmd = 'dms {} -n -w 1.4 -o {}'.format(self.receptorpdb_path, dms)
            os.system(cmd)

        self.dms_path = dms

        if ssbio.utils.is_non_zero_file(dms):
            self.dms_path = dms
            log.debug('{}: successful dms execution'.format(self.dms_path))
        else:
            log.critical('{}: dms_maker failed to run on receptor file'.format(self.receptorpdb_path))
项目:BadParser    作者:stanojevic    | 项目源码 | 文件源码
def train_and_tag_stanford(stanford_tagger_dir, fold_train, fold_test):
    tmp_dir = "tmp"
    if not exists(tmp_dir):
        mkdir(tmp_dir)

    train_fn = join(tmp_dir, "train")
    save_tagged_lines(fold_train, train_fn)
    test_fn = join(tmp_dir, "test")
    save_tagged_lines_without_tags(fold_test, test_fn)

    model_fn = join(tmp_dir, "model") # bidirectional5words
    train_cmd = "java -classpath %s/stanford-postagger.jar edu.stanford.nlp.tagger.maxent.MaxentTagger -arch %s -tagSeparator %s -nthreads 2  -sentenceDelimiter newline -tokenize false -model %s -trainFile %s"%(stanford_tagger_dir, architecture, separator, model_fn, train_fn)
    system(train_cmd)

    test_out_fn = join(tmp_dir, "test-tagged")
    test_cmd = "java -mx1300m -classpath %s/stanford-postagger.jar edu.stanford.nlp.tagger.maxent.MaxentTagger -preserveLines -sentenceDelimiter newline -tokenize false  -model %s -textFile %s > %s"%(stanford_tagger_dir, model_fn, test_fn, test_out_fn)
    system(test_cmd)

    return load_lines(test_out_fn)
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.
        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if osp.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_coco_annotation(index)
                    for index in self._image_index]

        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)
        return gt_roidb
项目:Gnome-Authenticator    作者:bil-elmoussaoui    | 项目源码 | 文件源码
def get_icon(image, size):
    """
        Generate a GdkPixbuf image
        :param image: icon name or image path
        :return: GdkPixbux Image
    """
    directory = path.join(env.get("DATA_DIR"), "applications", "images") + "/"
    theme = Gtk.IconTheme.get_default()
    if theme.has_icon(path.splitext(image)[0]):
        icon = theme.load_icon(path.splitext(image)[0], size, 0)
    elif path.exists(directory + image):
        icon = GdkPixbuf.Pixbuf.new_from_file(directory + image)
    elif path.exists(image):
        icon = GdkPixbuf.Pixbuf.new_from_file(image)
    else:
        icon = theme.load_icon("image-missing", size, 0)
    if icon.get_width() != size or icon.get_height() != size:
        icon = icon.scale_simple(size, size, GdkPixbuf.InterpType.BILINEAR)
    return icon
项目:Gnome-Authenticator    作者:bil-elmoussaoui    | 项目源码 | 文件源码
def create_file(file_path):
    """
        Create a file and create parent folder if missing
    """
    if not (path.isfile(file_path) and path.exists(file_path)):
        dirs = file_path.split("/")
        i = 0
        while i < len(dirs) - 1:
            directory = "/".join(dirs[0:i + 1]).strip()
            if not path.exists(directory) and len(directory) != 0:
                makedirs(directory)
                logging.debug("Creating directory %s " % directory)
            i += 1
        mknod(file_path)
        return True
    else:
        return False
项目:search_google    作者:rrwen    | 项目源码 | 文件源码
def download_links(self, dir_path):
    """Download web pages or images from search result links.

    Args:
      dir_path (str):
        Path of directory to save downloads of :class:`api.results`.links
    """
    links = self.links
    if not path.exists(dir_path):
      makedirs(dir_path)
    for i, url in enumerate(links):
      if 'start' in self.cseargs:
        i += int(self.cseargs['start'])
      ext = self.cseargs['fileType']
      ext = '.html' if ext == '' else '.' + ext
      file_name = self.cseargs['q'].replace(' ', '_') + '_' + str(i) + ext
      file_path = path.join(dir_path, file_name)
      r = requests.get(url, stream=True)
      if r.status_code == 200:
        with open(file_path, 'wb') as f:
          r.raw.decode_content = True
          shutil.copyfileobj(r.raw, f)
项目:sw-delta-python    作者:alexcasalboni    | 项目源码 | 文件源码
def get_delta(asked_file_path, cached_file_path):
    """ Return a tuple of (body, mime_type), given two file paths """

    if not exists(asked_file_path):
        raise InvalidAskedFile("%s not found." % asked_file_path)

    if not exists(cached_file_path):
        raise InvalidCachedFile("%s not found." % asked_file_path)

    with open(asked_file_path, 'r') as filep:
        asked_file_string = filep.read()

    with open(cached_file_path, 'r') as filep:
        cached_file_string = filep.read()

    body = calculate_delta(asked_file_string, cached_file_string)
    mime_type = "text/sw-delta"

    if len(body) > len(asked_file_string):
        body = asked_file_string
        mime_type, _ = guess_type(asked_file_path)

    return body, mime_type
项目:YoutubeTV    作者:dude56987    | 项目源码 | 文件源码
def setProtected(self,name):
        '''
        Set a name in the table to be protected from removal
        because of limits.
        '''
        # generate the filepath to the protected values
        # list
        filePath=pathJoin(self.path,'protected.table')
        # check if the path exists
        if pathExists(filePath):
            # read the protected list from the file
            protectedList=unpickle(loadFile(filePath))
        else:
            # create the list and append the name
            protectedList=[]
        # append the new value to the list
        protectedList.append(name)
        # pickle the protected list for storage
        protectedList=pickle(protectedList)
        # write the changes back to the protected list
        writeFile(filePath,protectedList)
    ################################################################################
项目:YoutubeTV    作者:dude56987    | 项目源码 | 文件源码
def loadValue(self,name):
        '''
        Loads a saved value and returns it.
        '''
        # find the file path in the names array
        if name in self.names:
            filePath=self.namePaths[name]
        else:
            return False
        # check if the path exists
        if pathExists(filePath):
            # load the data
            fileData=loadFile(filePath)
        else:
            # return false if the value does not exist
            return False
        # unpickle the filedata
        fileData = unpickle(fileData)
        debug.add('loading value '+str(name),fileData)
        # returns the value of a table stored on disk
        return fileData
    ################################################################################
项目:YoutubeTV    作者:dude56987    | 项目源码 | 文件源码
def deleteValue(self,name):
        '''
        Delete a value with name name.
        '''
        # clean up names to avoid stupid
        debug.add('deleting value ',name)
        # figure out the path to the named value file
        if name in self.names:
            filePath=self.namePaths[name]
            # remove the metadata entry
            del self.namePaths[name]
            # write changes to database metadata file
            writeFile(pathJoin(self.path,'names.table'),pickle(self.namePaths))
            # update the length and names attributes
            self.names=self.namePaths.keys()
            self.length=len(self.names)
        else:
            return False
        if pathExists(filePath):
            # remove the file accocated with the value
            removeFile(filePath)
            return True
        else:
            return False
################################################################################
项目:sublime-text-3-packages    作者:nickjj    | 项目源码 | 文件源码
def clear_cache(force = False):
  """
  If the folder exists, and has more than 5MB of icons in the cache, delete
  it to clear all the icons then recreate it.
  """
  from os.path import getsize, join, isfile, exists
  from os import makedirs, listdir
  from sublime import cache_path
  from shutil import rmtree

  # The icon cache path
  icon_path = join(cache_path(), "GutterColor")

  # The maximum amount of space to take up
  limit = 5242880 # 5 MB

  if exists(icon_path):
    size = sum(getsize(join(icon_path, f)) for f in listdir(icon_path) if isfile(join(icon_path, f)))
    if force or (size > limit): rmtree(icon_path)

  if not exists(icon_path): makedirs(icon_path)
项目:eurovision-country-selector    作者:mikejarrett    | 项目源码 | 文件源码
def get_countries_from_csv(filepath):
    """ Process a csv countaining country data.

    Args:
        filepath (str): A path to a csv file.

    Returns:
        list: sanitized country names as strings.

    Raises:
        OSError: If file does not exist at the filepath.
    """
    if not path.exists(filepath):
        raise OSError('Path to file: "{}" does not exist.'.format(filepath))

    with open(filepath, 'r') as csv_file:
        reader = csv.reader(csv_file)
        return [sanitize_string(''.join(row)) for row in reader]
项目:pi-topPULSE    作者:pi-top    | 项目源码 | 文件源码
def save(file_path, overwrite=False):
    """Saves recorded audio to a file."""

    global _temp_file_path

    if _thread_running is False:
        if _temp_file_path != "" and path.exists(_temp_file_path):
            if path.exists(file_path) is False or overwrite is True:

                if path.exists(file_path):
                    remove(file_path)

                rename(_temp_file_path, file_path)
                _temp_file_path = ""

            else:
                PTLogger.info("File already exists")
        else:
            PTLogger.info("No recorded audio data found")
    else:
        PTLogger.info("Microphone is still recording!")
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.
        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if osp.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_coco_annotation(index)
                    for index in self._image_index]

        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)
        return gt_roidb
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def download_celeb_a(dirpath):
    data_dir = 'celebA'
    if os.path.exists(os.path.join(dirpath, data_dir)):
        print('Found Celeb-A - skip')
        return
    url = 'https://drive.google.com/open?id=0B7EVK8r0v71pZjFTYXZWM3FlRnM'
    filepath = download(url, dirpath)
    zip_dir = ''
    with zipfile.ZipFile(filepath) as zf:
        zip_dir = zf.namelist()[0]
        zf.extractall(dirpath)
    os.remove(filepath)
    os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))

    attribute_url = 'https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AAB06FXaQRUNtjW9ntaoPGvCa?dl=0'
    filepath = download(attribute_url, dirpath)
项目:NarshaTech    作者:KimJangHyeon    | 项目源码 | 文件源码
def handle_template(self, template, subdir):
        """
        Determines where the app or project templates are.
        Use django.__path__[0] as the default because we don't
        know into which directory Django has been installed.
        """
        if template is None:
            return path.join(django.__path__[0], 'conf', subdir)
        else:
            if template.startswith('file://'):
                template = template[7:]
            expanded_template = path.expanduser(template)
            expanded_template = path.normpath(expanded_template)
            if path.isdir(expanded_template):
                return expanded_template
            if self.is_url(template):
                # downloads the file and returns the path
                absolute_path = self.download(template)
            else:
                absolute_path = path.abspath(expanded_template)
            if path.exists(absolute_path):
                return self.extract(absolute_path)

        raise CommandError("couldn't handle %s template %s." %
                           (self.app_or_project, template))
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def listdir(self, fil=None, sort=None):
        """ list directory contents, possibly filter by the given fil func
            and possibly sorted.
        """
        if fil is None and sort is None:
            names = py.error.checked_call(os.listdir, self.strpath)
            return map_as_list(self._fastjoin, names)
        if isinstance(fil, py.builtin._basestring):
            if not self._patternchars.intersection(fil):
                child = self._fastjoin(fil)
                if exists(child.strpath):
                    return [child]
                return []
            fil = common.FNMatcher(fil)
        names = py.error.checked_call(os.listdir, self.strpath)
        res = []
        for name in names:
            child = self._fastjoin(name)
            if fil is None or fil(child):
                res.append(child)
        self._sortlist(res, sort)
        return res
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def siblingExtensionSearch(self, *exts):
        """Attempt to return a path with my name, given multiple possible
        extensions.

        Each extension in exts will be tested and the first path which exists
        will be returned.  If no path exists, None will be returned.  If '' is
        in exts, then if the file referred to by this path exists, 'self' will
        be returned.

        The extension '*' has a magic meaning, which means "any path that
        begins with self.path+'.' is acceptable".
        """
        p = self.path
        for ext in exts:
            if not ext and self.exists():
                return self
            if ext == '*':
                basedot = basename(p)+'.'
                for fn in listdir(dirname(p)):
                    if fn.startswith(basedot):
                        return self.clonePath(joinpath(dirname(p), fn))
            p2 = p + ext
            if exists(p2):
                return self.clonePath(p2)
项目:d-tailor    作者:jcg    | 项目源码 | 文件源码
def analyze_duplex_mfe(filename,region = None):
    data = {}

    chdir(project_dir)

    if path.exists(project_dir+"/tmp/structures/"+filename+".ct"):
        try:
            res = check_output(["./3rdParty/unafold/ct-energy" , "tmp/structures/" + filename + ".ct"])

            if res != "":
                data['RNADuplexMFE'] = float(str(res).rstrip())
            else:
                data['RNADuplexMFE'] = 'NA'

        except NameError:
            data['RNADuplexMFE'] = 'NA'

        #system("rm tmp/structures/%s*" % filename)
    else:
        data['RNADuplexMFE'] = 'NA'

    return data
项目:d-tailor    作者:jcg    | 项目源码 | 文件源码
def analyze_duplex_ds(filename,seq1 = "", region = None):
    data = {}   

    chdir(project_dir)

    if region == None:
        if path.exists(project_dir+"/tmp/structures/"+filename+".ct"):            
            output_ds = Popen("perl 3rdParty/unafold/ss-count.pl tmp/structures/" + filename + ".ct | awk /[[:digit:]][[:blank:]]0/'{print $1}'", stdout=PIPE, shell=True).stdout.read()
            output = output_ds.split()
            data['RNADuplexDoubleStrandedBasesList_Mol1'] = [eval(k) for k in output if eval(k) <= len(seq1)]
            data['RNADuplexDoubleStrandedBases_Mol1'] = len(data['RNADuplexDoubleStrandedBasesList_Mol1'])
            data['RNADuplexDoubleStrandedBasesList_Mol2'] = [eval(k)-len(seq1) for k in output if eval(k) > len(seq1)]
            data['RNADuplexDoubleStrandedBases_Mol2'] = len(data['RNADuplexDoubleStrandedBasesList_Mol2'])
        else:
            data['RNADuplexDoubleStrandedBasesList_Mol1'] = 'NA'
            data['RNADuplexDoubleStrandedBases_Mol1'] = 'NA'
            data['RNADuplexDoubleStrandedBasesList_Mol2'] = 'NA'
            data['RNADuplexDoubleStrandedBases_Mol2'] = 'NA'

    #else:
    # TODO: get just DS bases from one region of the rna structure

    return data
项目:d-tailor    作者:jcg    | 项目源码 | 文件源码
def analyze_duplex_ss(filename,seq1 = "",region = None):
    data = {}       

    chdir(project_dir)

    if region == None:
        if path.exists(project_dir+"/tmp/structures/"+filename+".ct"):
            output_ss = Popen("perl 3rdParty/unafold/ss-count.pl tmp/structures/" + filename + ".ct | awk /[[:digit:]][[:blank:]]1/'{print $1}'", stdout=PIPE, shell=True).stdout.read()                    
            output = output_ss.split()
            data['RNADuplexSingleStrandedBasesList_Mol1'] = [eval(k) for k in output if eval(k) <= len(seq1)]
            data['RNADuplexSingleStrandedBases_Mol1'] = len(data['RNADuplexDoubleStrandedBasesList_Mol1'])
            data['RNADuplexSingleStrandedBasesList_Mol2'] = [eval(k)-len(seq1) for k in output if eval(k) > len(seq1)]
            data['RNADuplexSingleStrandedBases_Mol2'] = len(data['RNADuplexDoubleStrandedBasesList_Mol2'])
        else:
            data['RNADuplexSingleStrandedBasesList_Mol1'] = 'NA'
            data['RNADuplexSingleStrandedBases_Mol1'] = 'NA'
            data['RNADuplexSingleStrandedBasesList_Mol2'] = 'NA'
            data['RNADuplexSingleStrandedBases_Mol2'] = 'NA'    #else:
    # TODO: get just DS bases from one region of the rna structure

    return data