Python os.path 模块,curdir() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用os.path.curdir()

项目:code    作者:ActiveState    | 项目源码 | 文件源码
def run(self):
        ind=self.qu.get()
        url=self.url+str(ind)
        soup =bs.BeautifulSoup(''.join( ul.urlopen(url).readlines() ))
        bu = up.urlsplit(self.url)
        print 'started with the ' ,str(url).split('/')[-1],
        for i in  soup.find_all(attrs = { "class" : "recipe-title"}):
            sp = up.urlsplit(i.a.get('href'))
            path = sp.path
            print path
            if re.search(pat, path):
                path = bu.scheme+'://'+bu.netloc+path
                filename = str(path).split('/')[-2]
                filename = op.join(op.abspath(op.curdir),filename+'.py') # recipe will be stored in given location
#                filename = op.join(op.abspath(op.curdir),filename+'.html')
#uncomment the above line if downloading the web page for teh recipe
                print path
                self.q.put((path,filename))
        self.fetch_data()
        time.sleep(1)
        self.qu.task_done()
        self.q.join()
        print 'done with the ' ,str(url).split('/')[-1],
项目:crc-diagram    作者:IuryAlves    | 项目源码 | 文件源码
def test_use_extension_from_format(self):
        crcs = [
            CRC(
                kind='class',
                name='Enrollment',
                responsibilities=['Get students',
                                  'Get seminar', 'Get Final Grade'],
                collaborators=['Seminar']
            ),
        ]

        DotRender(crcs, format='svg').render('crc')

        self.assertTrue(exists(join(abspath(curdir), 'crc.svg')))

        remove('crc.svg')
        remove('crc')
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def resnet(n=3, num_output = 16):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""    
    net_name = "resnet-"    
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    if n > 18:
        # warm up
        solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
        solver.p.base_lr = 0.01
        solver.set_max_iter(500)
        solver.write()
        del solver

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.resnet_cifar(n, num_output=num_output)
    builder.write(folder=pt_folder)
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def resnet_orth(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""    
    net_name = "resnet-orth-"    
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    if n > 18:
        # warm up
        solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
        solver.p.base_lr = 0.01
        solver.set_max_iter(500)
        solver.write()
        del solver

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.resnet_cifar(n, orth=True)
    builder.write(folder=pt_folder)
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def resnet_orth_v2(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""    
    net_name = "resnet-orth-v2"    
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    if n > 18:
        # warm up
        solver = Solver(solver_name="solver_warm.prototxt", folder=pt_folder, lr_policy=Solver.policy.fixed)
        solver.p.base_lr = 0.01
        solver.set_max_iter(500)
        solver.write()
        del solver

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.resnet_cifar(n, orth=True, v2=True)
    builder.write(folder=pt_folder)
项目:decoding_challenge_cortana_2016_3rd    作者:kingjr    | 项目源码 | 文件源码
def test_save():
    """ Test saving raw"""
    tempdir = _TempDir()
    raw = Raw(fif_fname, preload=False)
    # can't write over file being read
    assert_raises(ValueError, raw.save, fif_fname)
    raw = Raw(fif_fname, preload=True)
    # can't overwrite file without overwrite=True
    assert_raises(IOError, raw.save, fif_fname)

    # test abspath support and annotations
    annot = Annotations([10], [10], ['test'], raw.info['meas_date'])
    raw.annotations = annot
    new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
    raw.save(op.join(tempdir, new_fname), overwrite=True)
    new_raw = Raw(op.join(tempdir, new_fname), preload=False)
    assert_raises(ValueError, new_raw.save, new_fname)
    assert_array_equal(annot.onset, new_raw.annotations.onset)
    assert_array_equal(annot.duration, new_raw.annotations.duration)
    assert_array_equal(annot.description, new_raw.annotations.description)
    assert_equal(annot.orig_time, new_raw.annotations.orig_time)
    # make sure we can overwrite the file we loaded when preload=True
    new_raw = Raw(op.join(tempdir, new_fname), preload=True)
    new_raw.save(op.join(tempdir, new_fname), overwrite=True)
    os.remove(new_fname)
项目:pyTSon_plugins    作者:Bluscream    | 项目源码 | 文件源码
def on_selectpath_clicked(self):
        dialog = QFileDialog()
        dialog.setFileMode(QFileDialog.Directory);
        dialog.setOption(QFileDialog.ShowDirsOnly);
        result = dialog.getExistingDirectory(self, 'Choose Directory', path.curdir)
        if result: self.imgpath.setText(result)
项目:EasyClangComplete    作者:niosus    | 项目源码 | 文件源码
def run_command(command, shell=True, cwd=path.curdir, env=environ):
        """Run a generic command in a subprocess.

        Args:
            command (str): command to run

        Returns:
            str: raw command output
        """
        try:
            stdin = None
            startupinfo = None
            if isinstance(command, list):
                command = subprocess.list2cmdline(command)
                log.debug("running command: \n%s", command)
            if sublime.platform() == "windows":
                # Don't let console window pop-up briefly.
                startupinfo = subprocess.STARTUPINFO()
                startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
                startupinfo.wShowWindow = subprocess.SW_HIDE
                stdin = subprocess.PIPE
            output = subprocess.check_output(command,
                                             stdin=stdin,
                                             stderr=subprocess.STDOUT,
                                             shell=shell,
                                             cwd=cwd,
                                             env=env,
                                             startupinfo=startupinfo)
            output_text = ''.join(map(chr, output))
        except subprocess.CalledProcessError as e:
            output_text = e.output.decode("utf-8")
            log.debug("command finished with code: %s", e.returncode)
            log.debug("command output: \n%s", output_text)
        return output_text
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def plain(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, num_output = 16)
    builder.write(folder=pt_folder)
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def plain_orth(n=3):
    """6n+2, n=3 5 7 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain-orth"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, orth=True)
    builder.write(folder=pt_folder)
项目:channel-pruning    作者:yihui-he    | 项目源码 | 文件源码
def acc(n=3):
    """6n+2, n=3 9 18 coresponds to 20 56 110 layers"""
    net_name = "plain"
    pt_folder = osp.join(osp.abspath(osp.curdir), net_name +str(6*n+2))
    name = net_name+str(6*n+2)+'-cifar10'

    solver = Solver(folder=pt_folder)
    solver.write()
    del solver

    builder = Net(name)
    builder.Data('cifar-10-batches-py/train', phase='TRAIN', crop_size=32)
    builder.Data('cifar-10-batches-py/test', phase='TEST')
    builder.plain_cifar(n, num_output = 16, inplace=False)
    builder.write(folder=pt_folder)
项目:niceman    作者:ReproNim    | 项目源码 | 文件源码
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_niceman=False, dirs=False):
    """Generator to find files matching regex

    Parameters
    ----------
    regex: basestring
    exclude: basestring, optional
      Matches to exclude
    exclude_vcs:
      If True, excludes commonly known VCS subdirectories.  If string, used
      as regex to exclude those files (regex: `%r`)
    exclude_niceman:
      If True, excludes files known to be niceman meta-data files (e.g. under
      .niceman/ subdirectory) (regex: `%r`)
    topdir: basestring, optional
      Directory where to search
    dirs: bool, optional
      Either to match directories as well as files
    """

    for dirpath, dirnames, filenames in os.walk(topdir):
        names = (dirnames + filenames) if dirs else filenames
        # TODO: might want to uniformize on windows to use '/'
        paths = (opj(dirpath, name) for name in names)
        for path in filter(re.compile(regex).search, paths):
            path = path.rstrip(dirsep)
            if exclude and re.search(exclude, path):
                continue
            if exclude_vcs and re.search(_VCS_REGEX, path):
                continue
            if exclude_niceman and re.search(_NICEMAN_REGEX, path):
                continue
            yield path
项目:niceman    作者:ReproNim    | 项目源码 | 文件源码
def is_explicit_path(path):
    """Return whether a path explicitly points to a location

    Any absolute path, or relative path starting with either '../' or
    './' is assumed to indicate a location on the filesystem. Any other
    path format is not considered explicit."""
    path = expandpath(path, force_absolute=False)
    return isabs(path) \
        or path.startswith(os.curdir + os.sep) \
        or path.startswith(os.pardir + os.sep)
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_raw(subject, data_type, run_index=0, hcp_path=op.curdir,
             verbose=None):
    """Read HCP raw data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
        'noise_empty_room'
        'noise_subject'
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    raw : instance of mne.io.Raw
        The MNE raw object.
    """
    pdf, config = get_file_paths(
        subject=subject, data_type=data_type, output='raw',
        run_index=run_index, hcp_path=hcp_path)

    raw = _read_raw_bti(pdf, config, convert=False, verbose=verbose)
    return raw
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_trial_info(subject, data_type, run_index=0, hcp_path=op.curdir):
    """Read information about trials

    Parameters
    ----------
    subject : str
        The HCP subject.
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.
    Returns
    -------
    trial_info : dict
        The trial info including event labels, indices and times.
    """

    trial_info_mat_fname = get_file_paths(
        subject=subject, data_type=data_type,
        output='trial_info', run_index=run_index,
        hcp_path=hcp_path)[0]

    trl_info = _read_trial_info(trial_info_mat_fname=trial_info_mat_fname)
    return trl_info
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_evokeds(subject, data_type, onset='stim', sensor_mode='mag',
                     hcp_path=op.curdir, kind='average'):
    """Read HCP processed data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
    onset : {'stim', 'resp'}
        The event onset. The mapping is generous, everything that is not a
        response is a stimulus, in the sense of internal or external events.
    sensor_mode : {'mag', 'planar'}
        The sensor projection. Defaults to 'mag'. Only relevant for
        evoked output.
    hcp_path : str
        The HCP directory, defaults to op.curdir.
    kind : {'average', 'standard_error'}
        The averaging mode. Defaults to 'average'.
    Returns
    -------
    epochs : instance of mne.Epochs
        The MNE epochs. Note, these are pseudo-epochs in the case of
        onset == 'rest'.
    """
    info = read_info(subject=subject, data_type=data_type,
                         hcp_path=hcp_path, run_index=0)

    evoked_files = list()
    for fname in get_file_paths(
            subject=subject, data_type=data_type, onset=onset,
            output='evoked', sensor_mode=sensor_mode, hcp_path=hcp_path):
        evoked_files.extend(_read_evoked(fname, sensor_mode, info, kind))
    return evoked_files
项目:calmjs    作者:calmjs    | 项目源码 | 文件源码
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
    """
    Given cmd, check where it is on PATH.

    Loosely based on the version in python 3.3.
    """

    if path is None:
        path = os.environ.get('PATH', defpath)
    if not path:
        return None

    paths = path.split(pathsep)

    if sys.platform == 'win32':
        # oh boy
        if curdir not in paths:
            paths = [curdir] + paths

        # also need to check the fileexts...
        pathext = os.environ.get('PATHEXT', '').split(pathsep)

        if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
            files = [cmd]
        else:
            files = [cmd + ext for ext in pathext]
    else:
        # sanity
        files = [cmd]

    seen = set()
    for p in paths:
        normpath = normcase(p)
        if normpath in seen:
            continue
        seen.add(normpath)
        for f in files:
            fn = os.path.join(p, f)
            if os.path.isfile(fn) and os.access(fn, mode):
                return fn

    return None
项目:lbry-android    作者:lbryio    | 项目源码 | 文件源码
def prepare_build_dir(self):

        if argv_contains('--private') and not argv_contains('--launcher'):
            print('WARNING: Received --private argument when this would '
                  'normally be generated automatically.')
            print('         This is probably bad unless you meant to do '
                  'that.')

        bdist_dir = 'build/bdist.android-{}'.format(self.arch)
        if exists(bdist_dir):
            rmtree(bdist_dir)
        makedirs(bdist_dir)

        globs = []
        for directory, patterns in self.distribution.package_data.items():
            for pattern in patterns:
                globs.append(join(directory, pattern))

        filens = []
        for pattern in globs:
            filens.extend(glob(pattern))

        main_py_dirs = []
        if not argv_contains('--launcher'):
            for filen in filens:
                new_dir = join(bdist_dir, dirname(filen))
                if not exists(new_dir):
                    makedirs(new_dir)
                print('Including {}'.format(filen))
                copyfile(filen, join(bdist_dir, filen))
                if basename(filen) in ('main.py', 'main.pyo'):
                    main_py_dirs.append(filen)

        # This feels ridiculous, but how else to define the main.py dir?
        # Maybe should just fail?
        if not main_py_dirs and not argv_contains('--launcher'):
            print('ERROR: Could not find main.py, so no app build dir defined')
            print('You should name your app entry point main.py')
            exit(1)
        if len(main_py_dirs) > 1:
            print('WARNING: Multiple main.py dirs found, using the shortest path')
        main_py_dirs = sorted(main_py_dirs, key=lambda j: len(split(j)))

        if not argv_contains('--launcher'):
            sys.argv.append('--private={}'.format(
                join(realpath(curdir), bdist_dir, dirname(main_py_dirs[0])))
            )
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_info(subject, data_type, run_index=0, hcp_path=op.curdir):
    """Read info from unprocessed data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
        'noise_empty_room'
        'noise_subject'
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.

    Returns
    -------
    info : instance of mne.io.meas_info.Info
        The MNE channel info object.

    .. note::
        HCP MEG does not deliver only 3 of the 5 task packages from MRI HCP.
    """
    raw, config = get_file_paths(
        subject=subject, data_type=data_type, output='raw',
        run_index=run_index, hcp_path=hcp_path)

    if not op.exists(raw):
        raw = None

    meg_info = _read_bti_info(raw, config)

    if raw is None:
        logger.info('Did not find Raw data. Guessing EMG, ECG and EOG '
                    'channels')
        rename_channels(meg_info, dict(_label_mapping))
    return meg_info
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_epochs(subject, data_type, onset='stim', run_index=0,
                    hcp_path=op.curdir, return_fixations_motor=False):
    """Read HCP processed data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
    onset : {'stim', 'resp', 'sentence', 'block'}
        The event onset. The mapping is generous, everything that is not a
        response is a stimulus, in the sense of internal or external events.
        `sentence` and `block` are specific to task_story_math.
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.
    return_fixations_motor : bool
        Weather to return fixations or regular trials. For motor data only.
        Defaults to False.
    Returns
    -------
    epochs : instance of mne.Epochs
        The MNE epochs. Note, these are pseudo-epochs in the case of
        onset == 'rest'.
    """
    info = read_info(subject=subject, data_type=data_type,
                         run_index=run_index, hcp_path=hcp_path)

    epochs_mat_fname = get_file_paths(
        subject=subject, data_type=data_type, output='epochs',
        onset=onset,
        run_index=run_index, hcp_path=hcp_path)[0]

    if data_type != 'task_motor':
        return_fixations_motor = None
    epochs = _read_epochs(epochs_mat_fname=epochs_mat_fname, info=info,
                          return_fixations_motor=return_fixations_motor)
    if data_type == 'task_motor':
        epochs.set_channel_types(
            {ch: 'emg' for ch in epochs.ch_names if 'EMG' in ch})
    return epochs
项目:mne-hcp    作者:mne-tools    | 项目源码 | 文件源码
def read_annot(subject, data_type, run_index=0, hcp_path=op.curdir):
    """Read annotations for bad data and ICA.

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.

    Returns
    -------
    out : dict
        The annotations.
    """
    bads_files = get_file_paths(
        subject=subject, data_type=data_type,
        output='bads', run_index=run_index,
        hcp_path=hcp_path)
    segments_fname = [k for k in bads_files if
                      k.endswith('baddata_badsegments.txt')][0]
    bads_fname = [k for k in bads_files if
                  k.endswith('baddata_badchannels.txt')][0]

    ica_files = get_file_paths(
        subject=subject, data_type=data_type,
        output='ica', run_index=run_index,
        hcp_path=hcp_path)
    ica_fname = [k for k in ica_files if k.endswith('icaclass_vs.txt')][0]

    out = dict()
    iter_fun = [
        ('channels', _parse_annotations_bad_channels, bads_fname),
        ('segments', _parse_annotations_segments, segments_fname),
        ('ica', _parse_annotations_ica, ica_fname)]

    for subtype, fun, fname in iter_fun:
        with open(fname, 'r') as fid:
            out[subtype] = fun(fid.read())

    return out