Python resource 模块,getrlimit() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用resource.getrlimit()

项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def merge(input_bams, output_bam, threads=1):
    ''' Merge the sorted bam chunks hierarchically to conserve open file handles '''
    soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
    soft -= 100

    tmp_dir = os.path.dirname(output_bam)
    while len(input_bams) > 1:
        new_bams = []
        for i in range(0, len(input_bams), soft):
            bam_chunk = input_bams[i:i+soft]
            if len(bam_chunk) > 1:
                new_bam = os.path.join(tmp_dir, "%d-%d.bam" % (i, len(input_bams)))
                tk_bam.merge(new_bam, bam_chunk, threads)
                new_bams.append(new_bam)
            else:
                new_bams.append(input_bams[i])
        input_bams = new_bams
    cr_utils.move(input_bams[0], output_bam)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def check_open_fh():
    _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    if 0 <= hard and hard < tk_constants.MIN_PROCESS_NOFILE:
        return False, "On machine: %s, process open file handle hard limit (%d) is less than %d. Please run 'ulimit -n %d' before restarting the pipeline." % (
            socket.gethostname(), hard, tk_constants.MIN_PROCESS_NOFILE, tk_constants.MIN_PROCESS_NOFILE)

    if not os.path.exists(tk_constants.GLOBAL_NOFILE_PATH):
        return False, "On machine: %s, %s does not exist." % (socket.gethostname(), tk_constants.GLOBAL_NOFILE_PATH)
    with open(tk_constants.GLOBAL_NOFILE_PATH) as f:
        glob_str = f.read().strip()
    if not glob_str.isdigit():
        return False, "On machine: %s, %s contains a non-integer global open file handle limit: %s." % (
            socket.gethostname(), tk_constants.GLOBAL_NOFILE_PATH, glob_str)

    glob = int(glob_str)
    if glob < tk_constants.MIN_GLOBAL_NOFILE:
        return False, "On machine: %s, global open file handle limit (%d) is less than %d. Please set the global file handle limit to %d before restarting the pipeline." % (
            socket.gethostname(), glob, tk_constants.MIN_GLOBAL_NOFILE, tk_constants.MIN_GLOBAL_NOFILE)
    return True, None
项目:NordVPN-NetworkManager    作者:Chadsr    | 项目源码 | 文件源码
def get_num_processes(num_servers):
    # Since each process is not resource heavy and simply takes time waiting for pings, maximise the number of processes (within constraints of the current configuration)

    # Maximum open file descriptors of current configuration
    soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)

    # Find how many file descriptors are already in use by the parent process
    ppid = os.getppid()
    used_file_descriptors = int(subprocess.run('ls -l /proc/' + str(ppid) + '/fd | wc -l', shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8'))

    # Max processes is the number of file descriptors left, before the sof limit (configuration maximum) is reached
    max_processes = int((soft_limit - used_file_descriptors) / 2)

    if num_servers > max_processes:
        return max_processes
    else:
        return num_servers
项目:pyShadowsocks    作者:FTwOoO    | 项目源码 | 文件源码
def set_open_file_limit_up_to(limit=65536):
    import settings

    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    limit = max(soft, limit)
    limit = min(limit, hard)

    while limit > soft:
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))
            break
        except ValueError:
            limit -= 256
        except:
            settings.CONFIG_LOG.exception('unexpected exception')

    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    settings.CONFIG_LOG.info('open file limit set to %d:%d', soft, hard)
    return (soft, hard)
项目:bitmask-dev    作者:leapcode    | 项目源码 | 文件源码
def prevent_core_dump():
    """ Prevent this process from generating a core dump.

        :return: ``None``.

        Set the soft and hard limits for core dump size to zero. On Unix,
        this entirely prevents the process from creating core dump.

        """
    core_resource = resource.RLIMIT_CORE

    try:
        # Ensure the resource limit exists on this platform, by requesting
        # its current value.
        core_limit_prev = resource.getrlimit(core_resource)
    except ValueError as exc:
        error = DaemonOSEnvironmentError(
            "System does not support RLIMIT_CORE resource limit"
            " ({exc})".format(exc=exc))
        raise error

    # Set hard and soft limits to zero, i.e. no core dump at all.
    core_limit = (0, 0)
    resource.setrlimit(core_resource, core_limit)
项目:bitmask-dev    作者:leapcode    | 项目源码 | 文件源码
def get_maximum_file_descriptors():
    """ Get the maximum number of open file descriptors for this process.

        :return: The number (integer) to use as the maximum number of open
            files for this process.

        The maximum is the process hard resource limit of maximum number of
        open file descriptors. If the limit is “infinity”, a default value
        of ``MAXFD`` is returned.

        """
    limits = resource.getrlimit(resource.RLIMIT_NOFILE)
    result = limits[1]
    if result == resource.RLIM_INFINITY:
        result = MAXFD
    return result
项目:qinling    作者:openstack    | 项目源码 | 文件源码
def main(number=1024, **kwargs):
    for name, desc in [
        ('RLIMIT_NOFILE', 'number of open files'),
    ]:
        limit_num = getattr(resource, name)
        soft, hard = resource.getrlimit(limit_num)
        print('Maximum %-25s (%-15s) : %20s %20s' % (desc, name, soft, hard))

    files = []

    try:
        for i in range(0, number):
            files.append(_create_file(i))
    finally:
        for f in files:
            f.close()
项目:qinling    作者:openstack    | 项目源码 | 文件源码
def main(number=128, **kwargs):
    for name, desc in [
        ('RLIMIT_NPROC', 'number of processes'),
    ]:
        limit_num = getattr(resource, name)
        soft, hard = resource.getrlimit(limit_num)
        print('Maximum %-25s (%-15s) : %20s %20s' % (desc, name, soft, hard))

    processes = []

    for i in range(0, number):
        p = Process(
            target=_sleep,
            args=(i,)
        )
        p.start()
        processes.append(p)

    for p in processes:
        p.join()
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print("this tests triggers the Crash Reporter, "
                      "that is intentional", end='')
                sys.stdout.flush()
项目:respeaker_virtualenv    作者:respeaker    | 项目源码 | 文件源码
def test_rlimit_get(self):
        import resource
        p = psutil.Process(os.getpid())
        names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
        assert names, names
        for name in names:
            value = getattr(psutil, name)
            self.assertGreaterEqual(value, 0)
            if name in dir(resource):
                self.assertEqual(value, getattr(resource, name))
                # XXX - On PyPy RLIMIT_INFINITY returned by
                # resource.getrlimit() is reported as a very big long
                # number instead of -1. It looks like a bug with PyPy.
                if PYPY:
                    continue
                self.assertEqual(p.rlimit(value), resource.getrlimit(value))
            else:
                ret = p.rlimit(value)
                self.assertEqual(len(ret), 2)
                self.assertGreaterEqual(ret[0], -1)
                self.assertGreaterEqual(ret[1], -1)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print "this tests triggers the Crash Reporter, that is intentional"
                sys.stdout.flush()
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print "this tests triggers the Crash Reporter, that is intentional"
                sys.stdout.flush()
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print("this tests triggers the Crash Reporter, "
                      "that is intentional", end='')
                sys.stdout.flush()
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print "this tests triggers the Crash Reporter, that is intentional"
                sys.stdout.flush()
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def __enter__(self):
        """Try to save previous ulimit, then set it to (0, 0)."""
        if resource is not None:
            try:
                self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
                resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
            except (ValueError, resource.error):
                pass

        if sys.platform == 'darwin':
            # Check if the 'Crash Reporter' on OSX was configured
            # in 'Developer' mode and warn that it will get triggered
            # when it is.
            #
            # This assumes that this context manager is used in tests
            # that might trigger the next manager.
            value = subprocess.Popen(['/usr/bin/defaults', 'read',
                    'com.apple.CrashReporter', 'DialogType'],
                    stdout=subprocess.PIPE).communicate()[0]
            if value.strip() == b'developer':
                print "this tests triggers the Crash Reporter, that is intentional"
                sys.stdout.flush()
项目:Chromium_DepotTools    作者:p07r0457    | 项目源码 | 文件源码
def setup_limit(self):
        """set up the process limit"""
        assert currentThread().getName() == 'MainThread'
        os.setpgrp()
        if self._limit_set <= 0:
            if self.max_time is not None:
                self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
                self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
                                    self._time_out)
                self._start_time = int(time())
                self._timer.start()
            if self.max_cpu_time is not None:
                self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
                cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
                self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
                setrlimit(RLIMIT_CPU, cpu_limit)
            if self.max_memory is not None:
                self._msentinel = MemorySentinel(1, int(self.max_memory) )
                self._old_max_memory = getrlimit(RLIMIT_AS)
                self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
                as_limit = (int(self.max_memory), self._old_max_memory[1])
                setrlimit(RLIMIT_AS, as_limit)
                self._msentinel.start()
        self._limit_set += 1
项目:node-gn    作者:Shouqun    | 项目源码 | 文件源码
def setup_limit(self):
        """set up the process limit"""
        assert currentThread().getName() == 'MainThread'
        os.setpgrp()
        if self._limit_set <= 0:
            if self.max_time is not None:
                self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
                self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
                                    self._time_out)
                self._start_time = int(time())
                self._timer.start()
            if self.max_cpu_time is not None:
                self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
                cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
                self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
                setrlimit(RLIMIT_CPU, cpu_limit)
            if self.max_memory is not None:
                self._msentinel = MemorySentinel(1, int(self.max_memory) )
                self._old_max_memory = getrlimit(RLIMIT_AS)
                self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
                as_limit = (int(self.max_memory), self._old_max_memory[1])
                setrlimit(RLIMIT_AS, as_limit)
                self._msentinel.start()
        self._limit_set += 1
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def _fallbackFDImplementation(self):
        """
        Fallback implementation where either the resource module can inform us
        about the upper bound of how many FDs to expect, or where we just guess
        a constant maximum if there is no resource module.

        All possible file descriptors from 0 to that upper bound are returned
        with no attempt to exclude invalid file descriptor values.
        """
        try:
            import resource
        except ImportError:
            maxfds = 1024
        else:
            # OS-X reports 9223372036854775808. That's a lot of fds to close.
            # OS-X should get the /dev/fd implementation instead, so mostly
            # this check probably isn't necessary.
            maxfds = min(1024, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
        return range(maxfds)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_rlimit_get(self):
        import resource
        p = psutil.Process(os.getpid())
        names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
        assert names, names
        for name in names:
            value = getattr(psutil, name)
            self.assertGreaterEqual(value, 0)
            if name in dir(resource):
                self.assertEqual(value, getattr(resource, name))
                # XXX - On PyPy RLIMIT_INFINITY returned by
                # resource.getrlimit() is reported as a very big long
                # number instead of -1. It looks like a bug with PyPy.
                if PYPY:
                    continue
                self.assertEqual(p.rlimit(value), resource.getrlimit(value))
            else:
                ret = p.rlimit(value)
                self.assertEqual(len(ret), 2)
                self.assertGreaterEqual(ret[0], -1)
                self.assertGreaterEqual(ret[1], -1)
项目:FancyWord    作者:EastonLee    | 项目源码 | 文件源码
def test_rlimit_get(self):
        import resource
        p = psutil.Process(os.getpid())
        names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
        assert names, names
        for name in names:
            value = getattr(psutil, name)
            self.assertGreaterEqual(value, 0)
            if name in dir(resource):
                self.assertEqual(value, getattr(resource, name))
                # XXX - On PyPy RLIMIT_INFINITY returned by
                # resource.getrlimit() is reported as a very big long
                # number instead of -1. It looks like a bug with PyPy.
                if PYPY:
                    continue
                self.assertEqual(p.rlimit(value), resource.getrlimit(value))
            else:
                ret = p.rlimit(value)
                self.assertEqual(len(ret), 2)
                self.assertGreaterEqual(ret[0], -1)
                self.assertGreaterEqual(ret[1], -1)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_urandom_failure(self):
        # Check urandom() failing when it is not able to open /dev/random.
        # We spawn a new process to make the test more robust (if getrlimit()
        # failed to restore the file descriptor limit after this, the whole
        # test suite would crash; this actually happened on the OS X Tiger
        # buildbot).
        code = """if 1:
            import errno
            import os
            import resource

            soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
            try:
                os.urandom(16)
            except OSError as e:
                assert e.errno == errno.EMFILE, e.errno
            else:
                raise AssertionError("OSError not raised")
            """
        assert_python_ok('-c', code)
项目:depot_tools    作者:webrtc-uwp    | 项目源码 | 文件源码
def setup_limit(self):
        """set up the process limit"""
        assert currentThread().getName() == 'MainThread'
        os.setpgrp()
        if self._limit_set <= 0:
            if self.max_time is not None:
                self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
                self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
                                    self._time_out)
                self._start_time = int(time())
                self._timer.start()
            if self.max_cpu_time is not None:
                self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
                cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
                self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
                setrlimit(RLIMIT_CPU, cpu_limit)
            if self.max_memory is not None:
                self._msentinel = MemorySentinel(1, int(self.max_memory) )
                self._old_max_memory = getrlimit(RLIMIT_AS)
                self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
                as_limit = (int(self.max_memory), self._old_max_memory[1])
                setrlimit(RLIMIT_AS, as_limit)
                self._msentinel.start()
        self._limit_set += 1
项目:wuye.vim    作者:zhaoyingnan911    | 项目源码 | 文件源码
def setup_limit(self):
        """set up the process limit"""
        assert currentThread().getName() == 'MainThread'
        os.setpgrp()
        if self._limit_set <= 0:
            if self.max_time is not None:
                self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
                self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
                                    self._time_out)
                self._start_time = int(time())
                self._timer.start()
            if self.max_cpu_time is not None:
                self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
                cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
                self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
                setrlimit(RLIMIT_CPU, cpu_limit)
            if self.max_memory is not None:
                self._msentinel = MemorySentinel(1, int(self.max_memory) )
                self._old_max_memory = getrlimit(RLIMIT_AS)
                self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
                as_limit = (int(self.max_memory), self._old_max_memory[1])
                setrlimit(RLIMIT_AS, as_limit)
                self._msentinel.start()
        self._limit_set += 1
项目:amadash    作者:ipartola    | 项目源码 | 文件源码
def close_open_files():
    '''Closes all open files. Useful after a fork.'''

    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if maxfd == resource.RLIM_INFINITY:
        maxfd = MAXFD

    for fd in reversed(range(maxfd)):
        try:
            os.close(fd)
        except OSError, e:
            if e.errno == errno.EBADF:
                pass # File not open
            else:
                raise Exception("Failed to close file descriptor %d: %s" % (fd, e))
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def config_max_files(self):
        soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
        self.maxfiles = soft - 100
项目:tracer    作者:angr    | 项目源码 | 文件源码
def _setup_env(self):
        prefix = "/tmp/tracer_"
        curdir = os.getcwd()
        tmpdir = tempfile.mkdtemp(prefix=prefix)
        # allow cores to be dumped
        saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
        resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
        binaries_old = [ ]
        for binary in self._binaries:
            binaries_old.append(binary)

        binary_replacements = [ ]
        for i, binary in enumerate(self._binaries):
            binary_replacements.append(os.path.join(tmpdir,"binary_replacement_%d" % i))

        for binary_o, binary_r in zip(binaries_old, binary_replacements):
            shutil.copy(binary_o, binary_r)

        self._binaries = binary_replacements
        if self.argv is not None and not self.is_multicb:
            self.argv = self._binaries + self.argv[1:]
        os.chdir(tmpdir)
        try:
            yield (tmpdir,binary_replacements)
        finally:
            assert tmpdir.startswith(prefix)
            shutil.rmtree(tmpdir)
            os.chdir(curdir)
            resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
            self._binaries = binaries_old
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def setUp(self):
        self.openSockets = []
        if resource is not None:
            self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (128, self.originalFileLimit[1]))
            self.socketLimit = 256
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def tearDown(self):
        while self.openSockets:
            self.openSockets.pop().close()
        if resource is not None:
            # OS X implicitly lowers the hard limit in the setrlimit call
            # above.  Retrieve the new hard limit to pass in to this
            # setrlimit call, so that it doesn't give us a permission denied
            # error.
            currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
            newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
            resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
项目:annotated-py-asyncio    作者:hhstore    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            NUM_FDS = min(hard, 2**16)
        except (OSError, ValueError):
            NUM_FDS = soft

        # guard for already allocated FDs (stdin, stdout...)
        NUM_FDS -= 32

        s = self.SELECTOR()
        self.addCleanup(s.close)

        for i in range(NUM_FDS // 2):
            try:
                rd, wr = self.make_socketpair()
            except OSError:
                # too many FDs, skip - note that we should only catch EMFILE
                # here, but apparently *BSD and Solaris can fail upon connect()
                # or bind() with EADDRNOTAVAIL, so let's be safe
                self.skipTest("FD limit reached")

            try:
                s.register(rd, selectors.EVENT_READ)
                s.register(wr, selectors.EVENT_WRITE)
            except OSError as e:
                if e.errno == errno.ENOSPC:
                    # this can be raised by epoll if we go over
                    # fs.epoll.max_user_watches sysctl
                    self.skipTest("FD limit reached")
                raise

        self.assertEqual(NUM_FDS // 2, len(s.select()))
项目:flasky    作者:RoseOu    | 项目源码 | 文件源码
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd
项目:selectors2    作者:SethMichaelLarson    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        if hard == resource.RLIM_INFINITY:
            self.skipTest("RLIMIT_NOFILE is infinite")

        try:  # If we're on a *BSD system, the limit tag is different.
            _, bsd_hard = resource.getrlimit(resource.RLIMIT_OFILE)
            if bsd_hard == resource.RLIM_INFINITY:
                self.skipTest("RLIMIT_OFILE is infinite")
            if bsd_hard < hard:
                hard = bsd_hard

        # NOTE: AttributeError resource.RLIMIT_OFILE is not defined on Mac OS.
        except (OSError, resource.error, AttributeError):
            pass

        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            limit_nofile = min(hard, 2 ** 16)
        except (OSError, ValueError):
            limit_nofile = soft

        # Guard against already allocated FDs
        limit_nofile -= 256
        limit_nofile = max(0, limit_nofile)

        s = self.make_selector()

        for i in range(limit_nofile // 2):
            rd, wr = self.make_socketpair()
            s.register(rd, selectors2.EVENT_READ)
            s.register(wr, selectors2.EVENT_WRITE)

        self.assertEqual(limit_nofile // 2, len(s.select()))
项目:pov_fuzzing    作者:mechaphish    | 项目源码 | 文件源码
def _setup_env(self):
        prefix = "/dev/shm/tracer_"
        curdir = os.getcwd()
        tmpdir = tempfile.mkdtemp(prefix=prefix)
        # dont prefilter the core
        if len(self.binaries) > 1:
            with open("/proc/self/coredump_filter", "wb") as f:
                f.write("00000077")

        # allow cores to be dumped
        saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
        resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
        binaries_old = [ ]
        for binary in self.binaries:
            binaries_old.append(os.path.abspath(binary))

        self.binaries = list(binaries_old)

        os.chdir(tmpdir)

        try:
            yield (tmpdir, self.binaries[0])

        finally:
            assert tmpdir.startswith(prefix)
            shutil.rmtree(tmpdir)
            os.chdir(curdir)
            resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
            self.binaries = binaries_old
项目:python-cookbook-3rd    作者:tuanavu    | 项目源码 | 文件源码
def set_max_runtime(seconds):
    # Install the signal handler and set a resource limit
    soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
    resource.setrlimit(resource.RLIMIT_CPU, (seconds, hard))
    signal.signal(signal.SIGXCPU, time_exceeded)
项目:landscape-client    作者:CanonicalLtd    | 项目源码 | 文件源码
def clean_fds():
    """Close all non-stdio file descriptors.

    This should be called at the beginning of a program to avoid inheriting any
    unwanted file descriptors from the invoking process.  Unfortunately, this
    is really common in unix!
    """
    rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    total_descriptors = min(4096, rlimit_nofile)
    for fd in range(3, total_descriptors):
        try:
            os.close(fd)
        except OSError:
            pass
项目:python-sensor    作者:instana    | 项目源码 | 文件源码
def collect_snapshot(self):
        try:
            if self.sensor.service_name:
                appname = self.sensor.service_name
            elif "FLASK_APP" in os.environ:
                appname = os.environ["FLASK_APP"]
            elif "DJANGO_SETTINGS_MODULE" in os.environ:
                appname = os.environ["DJANGO_SETTINGS_MODULE"].split('.')[0]
            else:
                appname = os.path.basename(sys.argv[0])

            s = Snapshot(name=appname,
                         version=sys.version,
                         rlimit_core=resource.getrlimit(resource.RLIMIT_CORE),
                         rlimit_cpu=resource.getrlimit(resource.RLIMIT_CPU),
                         rlimit_fsize=resource.getrlimit(
                             resource.RLIMIT_FSIZE),
                         rlimit_data=resource.getrlimit(resource.RLIMIT_DATA),
                         rlimit_stack=resource.getrlimit(
                             resource.RLIMIT_STACK),
                         rlimit_rss=resource.getrlimit(resource.RLIMIT_RSS),
                         rlimit_nproc=resource.getrlimit(
                             resource.RLIMIT_NPROC),
                         rlimit_nofile=resource.getrlimit(
                             resource.RLIMIT_NOFILE),
                         rlimit_memlock=resource.getrlimit(
                             resource.RLIMIT_MEMLOCK),
                         rlimit_as=resource.getrlimit(resource.RLIMIT_AS),
                         versions=self.collect_modules())

            return s
        except Exception as e:
            log.debug("collect_snapshot: ", str(e))

            return None
项目:chihu    作者:yelongyu    | 项目源码 | 文件源码
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd
项目:backup    作者:twindb    | 项目源码 | 文件源码
def set_open_files_limit():
    """Detect maximum supported number of open file and set it"""
    max_files = getrlimit(RLIMIT_NOFILE)[0]
    while True:
        try:
            setrlimit(RLIMIT_NOFILE, (max_files, max_files))
            max_files += 1
        except ValueError:
            break
    LOG.debug('Setting max files limit to %d', max_files)
项目:taf    作者:taf3    | 项目源码 | 文件源码
def main():
    # use number of open files soft limit and num cores to determinate Popen limit
    # use lesser of 4 * num cores or half max open files - 10
    default_parallel_limit = os.sysconf('SC_NPROCESSORS_ONLN') * 4
    parallel_limit = globals().get("parallel_limit", None)
    if parallel_limit is None:
        parallel_limit = default_parallel_limit
    parallel_limit = min(parallel_limit, (resource.getrlimit(resource.RLIMIT_NOFILE)[0] - 20) / 2)
    cmd_list = globals()["cmd_list"]
    results = list(multicall(cmd_list, parallel_limit))
    json.dump(results, sys.stdout)
项目:Price-Comparator    作者:Thejas-1    | 项目源码 | 文件源码
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd
项目:cocrawler    作者:cocrawler    | 项目源码 | 文件源码
def limit_resources():
    _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))  # XXX compare to max threads etc.

    _, hard = resource.getrlimit(resource.RLIMIT_AS)  # RLIMIT_VMEM does not exist?!
    resource.setrlimit(resource.RLIMIT_AS, (16 * 1024 * 1024 * 1024, hard))  # XXX config
项目:tabmaster    作者:NicolasMinghetti    | 项目源码 | 文件源码
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def setUp(self):
        self.openSockets = []
        if resource is not None:
            self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
            resource.setrlimit(resource.RLIMIT_NOFILE, (128, self.originalFileLimit[1]))
            self.socketLimit = 256
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def tearDown(self):
        while self.openSockets:
            self.openSockets.pop().close()
        if resource is not None:
            # OS X implicitly lowers the hard limit in the setrlimit call
            # above.  Retrieve the new hard limit to pass in to this
            # setrlimit call, so that it doesn't give us a permission denied
            # error.
            currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
            newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
            resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
项目:Cayenne-Agent    作者:myDevicesIoT    | 项目源码 | 文件源码
def setMemoryLimit(rsrc, megs = 200):
    size = megs * 1048576
    soft, hard = getrlimit(rsrc)
    setrlimit(rsrc, (size, hard)) #limit to one kilobyte
    soft, hard = getrlimit(rsrc)
    info ('Limit changed to :'+ str( soft))
项目:Cayenne-Agent    作者:myDevicesIoT    | 项目源码 | 文件源码
def SetMemoryLimits():
        try:
            from resource import getrlimit, setrlimit, RLIMIT_AS
            soft, hard = getrlimit(RLIMIT_AS)
            setrlimit(RLIMIT_AS, (hard, hard))
            soft, hard = getrlimit(RLIMIT_AS)
        except:
            pass