Python resource 模块,RLIMIT_NOFILE 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用resource.RLIMIT_NOFILE

项目:landscape-client    作者:CanonicalLtd    | 项目源码 | 文件源码
def test_clean_fds_sanity(self):
        """
        If the process limit for file descriptors is very high (> 4096), then
        we only close 4096 file descriptors.
        """
        closed_fds = []

        with patch("os.close", side_effect=closed_fds.append) as close_mock:
            with self.mock_getrlimit(4100) as getrlimit_mock:
                clean_fds()

        getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE)

        expected_fds = list(range(3, 4096))
        calls = [call(i) for i in expected_fds]
        close_mock.assert_has_calls(calls, any_order=True)

        self.assertEqual(closed_fds, expected_fds)
项目:landscape-client    作者:CanonicalLtd    | 项目源码 | 文件源码
def test_ignore_OSErrors(self):
        """
        If os.close raises an OSError, it is ignored and we continue to close
        the rest of the FDs.
        """
        closed_fds = []

        def remember_and_throw(fd):
            closed_fds.append(fd)
            raise OSError("Bad FD!")

        with patch("os.close", side_effect=remember_and_throw) as close_mock:
            with self.mock_getrlimit(10) as getrlimit_mock:
                clean_fds()

        getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE)
        expected_fds = list(range(3, 10))
        calls = [call(i) for i in expected_fds]
        close_mock.assert_has_calls(calls, any_order=True)
        self.assertEqual(closed_fds, expected_fds)
项目:annotated-py-asyncio    作者:hhstore    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            NUM_FDS = min(hard, 2**16)
        except (OSError, ValueError):
            NUM_FDS = soft

        # guard for already allocated FDs (stdin, stdout...)
        NUM_FDS -= 32

        s = self.SELECTOR()
        self.addCleanup(s.close)

        for i in range(NUM_FDS // 2):
            try:
                rd, wr = self.make_socketpair()
            except OSError:
                # too many FDs, skip - note that we should only catch EMFILE
                # here, but apparently *BSD and Solaris can fail upon connect()
                # or bind() with EADDRNOTAVAIL, so let's be safe
                self.skipTest("FD limit reached")

            try:
                s.register(rd, selectors.EVENT_READ)
                s.register(wr, selectors.EVENT_WRITE)
            except OSError as e:
                if e.errno == errno.ENOSPC:
                    # this can be raised by epoll if we go over
                    # fs.epoll.max_user_watches sysctl
                    self.skipTest("FD limit reached")
                raise

        self.assertEqual(NUM_FDS // 2, len(s.select()))
项目:selectors2    作者:SethMichaelLarson    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        if hard == resource.RLIM_INFINITY:
            self.skipTest("RLIMIT_NOFILE is infinite")

        try:  # If we're on a *BSD system, the limit tag is different.
            _, bsd_hard = resource.getrlimit(resource.RLIMIT_OFILE)
            if bsd_hard == resource.RLIM_INFINITY:
                self.skipTest("RLIMIT_OFILE is infinite")
            if bsd_hard < hard:
                hard = bsd_hard

        # NOTE: AttributeError resource.RLIMIT_OFILE is not defined on Mac OS.
        except (OSError, resource.error, AttributeError):
            pass

        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            limit_nofile = min(hard, 2 ** 16)
        except (OSError, ValueError):
            limit_nofile = soft

        # Guard against already allocated FDs
        limit_nofile -= 256
        limit_nofile = max(0, limit_nofile)

        s = self.make_selector()

        for i in range(limit_nofile // 2):
            rd, wr = self.make_socketpair()
            s.register(rd, selectors2.EVENT_READ)
            s.register(wr, selectors2.EVENT_WRITE)

        self.assertEqual(limit_nofile // 2, len(s.select()))
项目:landscape-client    作者:CanonicalLtd    | 项目源码 | 文件源码
def test_clean_fds_rlimit(self, close_mock):
        """
        L{clean_fds} cleans all non-stdio file descriptors up to the process
        limit for file descriptors.
        """
        with self.mock_getrlimit(10) as getrlimit_mock:
            clean_fds()

        calls = [call(i) for i in range(3, 10)]
        close_mock.assert_has_calls(calls, any_order=True)
        getrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE)
项目:python-sensor    作者:instana    | 项目源码 | 文件源码
def collect_snapshot(self):
        try:
            if self.sensor.service_name:
                appname = self.sensor.service_name
            elif "FLASK_APP" in os.environ:
                appname = os.environ["FLASK_APP"]
            elif "DJANGO_SETTINGS_MODULE" in os.environ:
                appname = os.environ["DJANGO_SETTINGS_MODULE"].split('.')[0]
            else:
                appname = os.path.basename(sys.argv[0])

            s = Snapshot(name=appname,
                         version=sys.version,
                         rlimit_core=resource.getrlimit(resource.RLIMIT_CORE),
                         rlimit_cpu=resource.getrlimit(resource.RLIMIT_CPU),
                         rlimit_fsize=resource.getrlimit(
                             resource.RLIMIT_FSIZE),
                         rlimit_data=resource.getrlimit(resource.RLIMIT_DATA),
                         rlimit_stack=resource.getrlimit(
                             resource.RLIMIT_STACK),
                         rlimit_rss=resource.getrlimit(resource.RLIMIT_RSS),
                         rlimit_nproc=resource.getrlimit(
                             resource.RLIMIT_NPROC),
                         rlimit_nofile=resource.getrlimit(
                             resource.RLIMIT_NOFILE),
                         rlimit_memlock=resource.getrlimit(
                             resource.RLIMIT_MEMLOCK),
                         rlimit_as=resource.getrlimit(resource.RLIMIT_AS),
                         versions=self.collect_modules())

            return s
        except Exception as e:
            log.debug("collect_snapshot: ", str(e))

            return None
项目:backup    作者:twindb    | 项目源码 | 文件源码
def set_open_files_limit():
    """Detect maximum supported number of open file and set it"""
    max_files = getrlimit(RLIMIT_NOFILE)[0]
    while True:
        try:
            setrlimit(RLIMIT_NOFILE, (max_files, max_files))
            max_files += 1
        except ValueError:
            break
    LOG.debug('Setting max files limit to %d', max_files)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            NUM_FDS = min(hard, 2**16)
        except (OSError, ValueError):
            NUM_FDS = soft

        # guard for already allocated FDs (stdin, stdout...)
        NUM_FDS -= 32

        s = self.SELECTOR()
        self.addCleanup(s.close)

        for i in range(NUM_FDS // 2):
            try:
                rd, wr = self.make_socketpair()
            except OSError:
                # too many FDs, skip - note that we should only catch EMFILE
                # here, but apparently *BSD and Solaris can fail upon connect()
                # or bind() with EADDRNOTAVAIL, so let's be safe
                self.skipTest("FD limit reached")

            try:
                s.register(rd, selectors.EVENT_READ)
                s.register(wr, selectors.EVENT_WRITE)
            except OSError as e:
                if e.errno == errno.ENOSPC:
                    # this can be raised by epoll if we go over
                    # fs.epoll.max_user_watches sysctl
                    self.skipTest("FD limit reached")
                raise

        self.assertEqual(NUM_FDS // 2, len(s.select()))
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_above_fd_setsize(self):
        # A scalable implementation should have no problem with more than
        # FD_SETSIZE file descriptors. Since we don't know the value, we just
        # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
            self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
                            (soft, hard))
            NUM_FDS = min(hard, 2**16)
        except (OSError, ValueError):
            NUM_FDS = soft

        # guard for already allocated FDs (stdin, stdout...)
        NUM_FDS -= 32

        s = self.SELECTOR()
        self.addCleanup(s.close)

        for i in range(NUM_FDS // 2):
            try:
                rd, wr = self.make_socketpair()
            except OSError:
                # too many FDs, skip - note that we should only catch EMFILE
                # here, but apparently *BSD and Solaris can fail upon connect()
                # or bind() with EADDRNOTAVAIL, so let's be safe
                self.skipTest("FD limit reached")

            try:
                s.register(rd, selectors.EVENT_READ)
                s.register(wr, selectors.EVENT_WRITE)
            except OSError as e:
                if e.errno == errno.ENOSPC:
                    # this can be raised by epoll if we go over
                    # fs.epoll.max_user_watches sysctl
                    self.skipTest("FD limit reached")
                raise

        self.assertEqual(NUM_FDS // 2, len(s.select()))
项目:manticore    作者:trailofbits    | 项目源码 | 文件源码
def get_open_fds(self):
        fds = []
        for fd in range(3, resource.RLIMIT_NOFILE):
            try:
                flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            except IOError:
                continue
            fds.append(fd)
        return fds
项目:containernet    作者:containernet    | 项目源码 | 文件源码
def fixLimits():
    "Fix ridiculously small resource limits."
    debug( "*** Setting resource limits\n" )
    try:
        rlimitTestAndSet( RLIMIT_NPROC, 8192 )
        rlimitTestAndSet( RLIMIT_NOFILE, 16384 )
        #Increase open file limit
        sysctlTestAndSet( 'fs.file-max', 10000 )
        #Increase network buffer space
        sysctlTestAndSet( 'net.core.wmem_max', 16777216 )
        sysctlTestAndSet( 'net.core.rmem_max', 16777216 )
        sysctlTestAndSet( 'net.ipv4.tcp_rmem', '10240 87380 16777216' )
        sysctlTestAndSet( 'net.ipv4.tcp_wmem', '10240 87380 16777216' )
        sysctlTestAndSet( 'net.core.netdev_max_backlog', 5000 )
        #Increase arp cache size
        sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh1', 4096 )
        sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh2', 8192 )
        sysctlTestAndSet( 'net.ipv4.neigh.default.gc_thresh3', 16384 )
        #Increase routing table size
        sysctlTestAndSet( 'net.ipv4.route.max_size', 32768 )
        #Increase number of PTYs for nodes
        sysctlTestAndSet( 'kernel.pty.max', 20000 )
    # pylint: disable=broad-except
    except Exception:
        warn( "*** Warning: setting resource limits. "
              "Mininet's performance may be affected.\n" )
    # pylint: enable=broad-except
项目:landscape-client    作者:CanonicalLtd    | 项目源码 | 文件源码
def startService(self):
        Service.startService(self)
        bootstrap_list.bootstrap(data_path=self._config.data_path,
                                 log_dir=self._config.log_dir)
        if self._config.clones > 0:

            # Let clones open an appropriate number of fds
            setrlimit(RLIMIT_NOFILE, (self._config.clones * 100,
                                      self._config.clones * 200))

            # Increase the timeout of AMP's MethodCalls.
            # XXX: we should find a better way to expose this knot, and
            # not set it globally on the class
            from landscape.lib.amp import MethodCallSender
            MethodCallSender.timeout = 300

            # Create clones log and data directories
            for i in range(self._config.clones):
                suffix = "-clone-%d" % i
                bootstrap_list.bootstrap(
                    data_path=self._config.data_path + suffix,
                    log_dir=self._config.log_dir + suffix)

        result = succeed(None)
        result.addCallback(lambda _: self.watchdog.check_running())

        def start_if_not_running(running_daemons):
            if running_daemons:
                error("ERROR: The following daemons are already running: %s"
                      % (", ".join(x.program for x in running_daemons)))
                self.exit_code = 1
                reactor.crash()  # so stopService isn't called.
                return
            self._daemonize()
            info("Watchdog watching for daemons.")
            return self.watchdog.start()

        def die(failure):
            log_failure(failure, "Unknown error occurred!")
            self.exit_code = 2
            reactor.crash()
        result.addCallback(start_if_not_running)
        result.addErrback(die)
        return result