Python sys 模块,getcheckinterval() 实例源码

我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用sys.getcheckinterval()

项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_enumerate_after_join(self):
        # Try hard to trigger #1703448: a thread is still returned in
        # threading.enumerate() after it has been join()ed.
        enum = threading.enumerate
        newgil = hasattr(sys, 'getswitchinterval')
        if newgil:
            geti, seti = sys.getswitchinterval, sys.setswitchinterval
        else:
            geti, seti = sys.getcheckinterval, sys.setcheckinterval
        old_interval = geti()
        try:
            for i in range(1, 100):
                seti(i * 0.0002 if newgil else i // 5)
                t = threading.Thread(target=lambda: None)
                t.start()
                t.join()
                l = enum()
                self.assertNotIn(t, l,
                    "#1703448 triggered after %d trials: %s" % (i, l))
        finally:
            seti(old_interval)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_enumerate_after_join(self):
        # Try hard to trigger #1703448: a thread is still returned in
        # threading.enumerate() after it has been join()ed.
        enum = threading.enumerate
        old_interval = sys.getcheckinterval()
        try:
            for i in xrange(1, 100):
                # Try a couple times at each thread-switching interval
                # to get more interleavings.
                sys.setcheckinterval(i // 5)
                t = threading.Thread(target=lambda: None)
                t.start()
                t.join()
                l = enum()
                self.assertNotIn(t, l,
                    "#1703448 triggered after %d trials: %s" % (i, l))
        finally:
            sys.setcheckinterval(old_interval)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_is_alive_after_fork(self):
        # Try hard to trigger #18418: is_alive() could sometimes be True on
        # threads that vanished after a fork.
        old_interval = sys.getcheckinterval()

        # Make the bug more likely to manifest.
        sys.setcheckinterval(10)

        try:
            for i in range(20):
                t = threading.Thread(target=lambda: None)
                t.start()
                pid = os.fork()
                if pid == 0:
                    os._exit(1 if t.is_alive() else 0)
                else:
                    t.join()
                    pid, status = os.waitpid(pid, 0)
                    self.assertEqual(0, status)
        finally:
            sys.setcheckinterval(old_interval)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_enumerate_after_join(self):
        # Try hard to trigger #1703448: a thread is still returned in
        # threading.enumerate() after it has been join()ed.
        enum = threading.enumerate
        old_interval = sys.getcheckinterval()
        try:
            for i in xrange(1, 100):
                # Try a couple times at each thread-switching interval
                # to get more interleavings.
                sys.setcheckinterval(i // 5)
                t = threading.Thread(target=lambda: None)
                t.start()
                t.join()
                l = enum()
                self.assertNotIn(t, l,
                    "#1703448 triggered after %d trials: %s" % (i, l))
        finally:
            sys.setcheckinterval(old_interval)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_is_alive_after_fork(self):
        # Try hard to trigger #18418: is_alive() could sometimes be True on
        # threads that vanished after a fork.
        old_interval = sys.getcheckinterval()

        # Make the bug more likely to manifest.
        sys.setcheckinterval(10)

        try:
            for i in range(20):
                t = threading.Thread(target=lambda: None)
                t.start()
                pid = os.fork()
                if pid == 0:
                    os._exit(1 if t.is_alive() else 0)
                else:
                    t.join()
                    pid, status = os.waitpid(pid, 0)
                    self.assertEqual(0, status)
        finally:
            sys.setcheckinterval(old_interval)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_enumerate_after_join(self):
        # Try hard to trigger #1703448: a thread is still returned in
        # threading.enumerate() after it has been join()ed.
        enum = threading.enumerate
        old_interval = sys.getcheckinterval()
        try:
            for i in xrange(1, 100):
                # Try a couple times at each thread-switching interval
                # to get more interleavings.
                sys.setcheckinterval(i // 5)
                t = threading.Thread(target=lambda: None)
                t.start()
                t.join()
                l = enum()
                self.assertNotIn(t, l,
                    "#1703448 triggered after %d trials: %s" % (i, l))
        finally:
            sys.setcheckinterval(old_interval)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_is_alive_after_fork(self):
        # Try hard to trigger #18418: is_alive() could sometimes be True on
        # threads that vanished after a fork.
        old_interval = sys.getcheckinterval()

        # Make the bug more likely to manifest.
        sys.setcheckinterval(10)

        try:
            for i in range(20):
                t = threading.Thread(target=lambda: None)
                t.start()
                pid = os.fork()
                if pid == 0:
                    os._exit(1 if t.is_alive() else 0)
                else:
                    t.join()
                    pid, status = os.waitpid(pid, 0)
                    self.assertEqual(0, status)
        finally:
            sys.setcheckinterval(old_interval)
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def test_enumerate_after_join(self):
        # Try hard to trigger #1703448: a thread is still returned in
        # threading.enumerate() after it has been join()ed.
        enum = threading.enumerate
        old_interval = sys.getcheckinterval()
        try:
            for i in xrange(1, 100):
                # Try a couple times at each thread-switching interval
                # to get more interleavings.
                sys.setcheckinterval(i // 5)
                t = threading.Thread(target=lambda: None)
                t.start()
                t.join()
                l = enum()
                self.assertNotIn(t, l,
                    "#1703448 triggered after %d trials: %s" % (i, l))
        finally:
            sys.setcheckinterval(old_interval)
项目:deb-python-concurrent.futures    作者:openstack    | 项目源码 | 文件源码
def test_pending_calls_race(self):
        # Issue #14406: multi-threaded race condition when waiting on all
        # futures.
        event = threading.Event()
        def future_func():
            event.wait()
        oldswitchinterval = sys.getcheckinterval()
        sys.setcheckinterval(1)
        try:
            fs = set(self.executor.submit(future_func) for i in range(100))
            event.set()
            futures.wait(fs, return_when=futures.ALL_COMPLETED)
        finally:
            sys.setcheckinterval(oldswitchinterval)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def setUpClass(self):
            self.checkInterval = sys.getcheckinterval()
            sys.setcheckinterval(7)
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.assertRaises(TypeError, sys.setcheckinterval)
            orig = sys.getcheckinterval()
            for n in 0, 100, 120, orig: # orig last to restore starting state
                sys.setcheckinterval(n)
                self.assertEqual(sys.getcheckinterval(), n)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        self.assertRaises(TypeError, sys.setcheckinterval)
        orig = sys.getcheckinterval()
        for n in 0, 100, 120, orig: # orig last to restore starting state
            sys.setcheckinterval(n)
            self.assertEqual(sys.getcheckinterval(), n)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        self.assertRaises(TypeError, sys.setcheckinterval)
        orig = sys.getcheckinterval()
        for n in 0, 100, 120, orig: # orig last to restore starting state
            sys.setcheckinterval(n)
            self.assertEqual(sys.getcheckinterval(), n)
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def setUpClass(self):
            self.checkInterval = sys.getcheckinterval()
            sys.setcheckinterval(7)
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.assertRaises(TypeError, sys.setcheckinterval)
            orig = sys.getcheckinterval()
            for n in 0, 100, 120, orig: # orig last to restore starting state
                sys.setcheckinterval(n)
                self.assertEqual(sys.getcheckinterval(), n)
项目:NZ-ORCID-Hub    作者:Royal-Society-of-New-Zealand    | 项目源码 | 文件源码
def get_py_internals():  # noqa: D103
    py_internals = []
    if hasattr(sys, 'builtin_module_names'):
        py_internals.append(('Built-in Modules', ', '.join(sys.builtin_module_names)))
    py_internals.append(('Byte Order', sys.byteorder + ' endian'))

    if hasattr(sys, 'getcheckinterval'):
        py_internals.append(('Check Interval', sys.getcheckinterval()))

    if hasattr(sys, 'getfilesystemencoding'):
        py_internals.append(('File System Encoding', sys.getfilesystemencoding()))

    max_integer_size = str(sys.maxsize) + ' (%s)' % \
                                          hex(sys.maxsize).upper()
    py_internals.append(('Maximum Integer Size', max_integer_size))

    if hasattr(sys, 'getrecursionlimit'):
        py_internals.append(('Maximum Recursion Depth', sys.getrecursionlimit()))

    if hasattr(sys, 'tracebacklimit'):
        traceback_limit = sys.tracebacklimit
    else:
        traceback_limit = 1000
    py_internals.append(('Maximum Traceback Limit', traceback_limit))

    py_internals.append(('Maximum Code Point', sys.maxunicode))
    return py_internals
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        self.assertRaises(TypeError, sys.setcheckinterval)
        orig = sys.getcheckinterval()
        for n in 0, 100, 120, orig: # orig last to restore starting state
            sys.setcheckinterval(n)
            self.assertEqual(sys.getcheckinterval(), n)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.assertRaises(TypeError, sys.setcheckinterval)
            orig = sys.getcheckinterval()
            for n in 0, 100, 120, orig: # orig last to restore starting state
                sys.setcheckinterval(n)
                self.assertEqual(sys.getcheckinterval(), n)
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        self.assertRaises(TypeError, sys.setcheckinterval)
        orig = sys.getcheckinterval()
        for n in 0, 100, 120, orig: # orig last to restore starting state
            sys.setcheckinterval(n)
            self.assertEqual(sys.getcheckinterval(), n)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def setUp(self):
        """
        Reduce the CPython check interval so that thread switches happen much
        more often, hopefully exercising more possible race conditions.  Also,
        delay actual test startup until the reactor has been started.
        """
        if _PY3:
            if getattr(sys, 'getswitchinterval', None) is not None:
                self.addCleanup(sys.setswitchinterval, sys.getswitchinterval())
                sys.setswitchinterval(0.0000001)
        else:
            if getattr(sys, 'getcheckinterval', None) is not None:
                self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
                sys.setcheckinterval(7)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_setcheckinterval(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.assertRaises(TypeError, sys.setcheckinterval)
            orig = sys.getcheckinterval()
            for n in 0, 100, 120, orig: # orig last to restore starting state
                sys.setcheckinterval(n)
                self.assertEqual(sys.getcheckinterval(), n)
项目:iCompleteMe    作者:jerrymarino    | 项目源码 | 文件源码
def test_pending_calls_race(self):
        # Issue #14406: multi-threaded race condition when waiting on all
        # futures.
        event = threading.Event()
        def future_func():
            event.wait()
        oldswitchinterval = sys.getcheckinterval()
        sys.setcheckinterval(1)
        try:
            fs = set(self.executor.submit(future_func) for i in range(100))
            event.set()
            futures.wait(fs, return_when=futures.ALL_COMPLETED)
        finally:
            sys.setcheckinterval(oldswitchinterval)
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def test_thread_safety_during_modification(self):
        hosts = range(100)
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        errors = []

        def check_query_plan():
            try:
                for i in xrange(100):
                    list(policy.make_query_plan())
            except Exception as exc:
                errors.append(exc)

        def host_up():
            for i in xrange(1000):
                policy.on_up(randint(0, 99))

        def host_down():
            for i in xrange(1000):
                policy.on_down(randint(0, 99))

        threads = []
        for i in range(5):
            threads.append(Thread(target=check_query_plan))
            threads.append(Thread(target=host_up))
            threads.append(Thread(target=host_down))

        # make the GIL switch after every instruction, maximizing
        # the chance of race conditions
        check = six.PY2 or '__pypy__' in sys.builtin_module_names
        if check:
            original_interval = sys.getcheckinterval()
        else:
            original_interval = sys.getswitchinterval()

        try:
            if check:
                sys.setcheckinterval(0)
            else:
                sys.setswitchinterval(0.0001)
            map(lambda t: t.start(), threads)
            map(lambda t: t.join(), threads)
        finally:
            if check:
                sys.setcheckinterval(original_interval)
            else:
                sys.setswitchinterval(original_interval)

        if errors:
            self.fail("Saw errors: %s" % (errors,))
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_trashcan_threads(self):
        # Issue #13992: trashcan mechanism should be thread-safe
        NESTING = 60
        N_THREADS = 2

        def sleeper_gen():
            """A generator that releases the GIL when closed or dealloc'ed."""
            try:
                yield
            finally:
                time.sleep(0.000001)

        class C(list):
            # Appending to a list is atomic, which avoids the use of a lock.
            inits = []
            dels = []
            def __init__(self, alist):
                self[:] = alist
                C.inits.append(None)
            def __del__(self):
                # This __del__ is called by subtype_dealloc().
                C.dels.append(None)
                # `g` will release the GIL when garbage-collected.  This
                # helps assert subtype_dealloc's behaviour when threads
                # switch in the middle of it.
                g = sleeper_gen()
                next(g)
                # Now that __del__ is finished, subtype_dealloc will proceed
                # to call list_dealloc, which also uses the trashcan mechanism.

        def make_nested():
            """Create a sufficiently nested container object so that the
            trashcan mechanism is invoked when deallocating it."""
            x = C([])
            for i in range(NESTING):
                x = [C([x])]
            del x

        def run_thread():
            """Exercise make_nested() in a loop."""
            while not exit:
                make_nested()

        old_checkinterval = sys.getcheckinterval()
        sys.setcheckinterval(3)
        try:
            exit = []
            threads = []
            for i in range(N_THREADS):
                t = threading.Thread(target=run_thread)
                threads.append(t)
            with start_threads(threads, lambda: exit.append(1)):
                time.sleep(1.0)
        finally:
            sys.setcheckinterval(old_checkinterval)
        gc.collect()
        self.assertEqual(len(C.inits), len(C.dels))
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_trashcan_threads(self):
        # Issue #13992: trashcan mechanism should be thread-safe
        NESTING = 60
        N_THREADS = 2

        def sleeper_gen():
            """A generator that releases the GIL when closed or dealloc'ed."""
            try:
                yield
            finally:
                time.sleep(0.000001)

        class C(list):
            # Appending to a list is atomic, which avoids the use of a lock.
            inits = []
            dels = []
            def __init__(self, alist):
                self[:] = alist
                C.inits.append(None)
            def __del__(self):
                # This __del__ is called by subtype_dealloc().
                C.dels.append(None)
                # `g` will release the GIL when garbage-collected.  This
                # helps assert subtype_dealloc's behaviour when threads
                # switch in the middle of it.
                g = sleeper_gen()
                next(g)
                # Now that __del__ is finished, subtype_dealloc will proceed
                # to call list_dealloc, which also uses the trashcan mechanism.

        def make_nested():
            """Create a sufficiently nested container object so that the
            trashcan mechanism is invoked when deallocating it."""
            x = C([])
            for i in range(NESTING):
                x = [C([x])]
            del x

        def run_thread():
            """Exercise make_nested() in a loop."""
            while not exit:
                make_nested()

        old_checkinterval = sys.getcheckinterval()
        sys.setcheckinterval(3)
        try:
            exit = []
            threads = []
            for i in range(N_THREADS):
                t = threading.Thread(target=run_thread)
                threads.append(t)
            with start_threads(threads, lambda: exit.append(1)):
                time.sleep(1.0)
        finally:
            sys.setcheckinterval(old_checkinterval)
        gc.collect()
        self.assertEqual(len(C.inits), len(C.dels))
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_trashcan_threads(self):
        # Issue #13992: trashcan mechanism should be thread-safe
        NESTING = 60
        N_THREADS = 2

        def sleeper_gen():
            """A generator that releases the GIL when closed or dealloc'ed."""
            try:
                yield
            finally:
                time.sleep(0.000001)

        class C(list):
            # Appending to a list is atomic, which avoids the use of a lock.
            inits = []
            dels = []
            def __init__(self, alist):
                self[:] = alist
                C.inits.append(None)
            def __del__(self):
                # This __del__ is called by subtype_dealloc().
                C.dels.append(None)
                # `g` will release the GIL when garbage-collected.  This
                # helps assert subtype_dealloc's behaviour when threads
                # switch in the middle of it.
                g = sleeper_gen()
                next(g)
                # Now that __del__ is finished, subtype_dealloc will proceed
                # to call list_dealloc, which also uses the trashcan mechanism.

        def make_nested():
            """Create a sufficiently nested container object so that the
            trashcan mechanism is invoked when deallocating it."""
            x = C([])
            for i in range(NESTING):
                x = [C([x])]
            del x

        def run_thread():
            """Exercise make_nested() in a loop."""
            while not exit:
                make_nested()

        old_checkinterval = sys.getcheckinterval()
        sys.setcheckinterval(3)
        try:
            exit = False
            threads = []
            for i in range(N_THREADS):
                t = threading.Thread(target=run_thread)
                threads.append(t)
            for t in threads:
                t.start()
            time.sleep(1.0)
            exit = True
            for t in threads:
                t.join()
        finally:
            sys.setcheckinterval(old_checkinterval)
        gc.collect()
        self.assertEqual(len(C.inits), len(C.dels))
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def test_trashcan_threads(self):
        # Issue #13992: trashcan mechanism should be thread-safe
        NESTING = 60
        N_THREADS = 2

        def sleeper_gen():
            """A generator that releases the GIL when closed or dealloc'ed."""
            try:
                yield
            finally:
                time.sleep(0.000001)

        class C(list):
            # Appending to a list is atomic, which avoids the use of a lock.
            inits = []
            dels = []
            def __init__(self, alist):
                self[:] = alist
                C.inits.append(None)
            def __del__(self):
                # This __del__ is called by subtype_dealloc().
                C.dels.append(None)
                # `g` will release the GIL when garbage-collected.  This
                # helps assert subtype_dealloc's behaviour when threads
                # switch in the middle of it.
                g = sleeper_gen()
                next(g)
                # Now that __del__ is finished, subtype_dealloc will proceed
                # to call list_dealloc, which also uses the trashcan mechanism.

        def make_nested():
            """Create a sufficiently nested container object so that the
            trashcan mechanism is invoked when deallocating it."""
            x = C([])
            for i in range(NESTING):
                x = [C([x])]
            del x

        def run_thread():
            """Exercise make_nested() in a loop."""
            while not exit:
                make_nested()

        old_checkinterval = sys.getcheckinterval()
        sys.setcheckinterval(3)
        try:
            exit = False
            threads = []
            for i in range(N_THREADS):
                t = threading.Thread(target=run_thread)
                threads.append(t)
            for t in threads:
                t.start()
            time.sleep(1.0)
            exit = True
            for t in threads:
                t.join()
        finally:
            sys.setcheckinterval(old_checkinterval)
        gc.collect()
        self.assertEqual(len(C.inits), len(C.dels))
项目:python-dse-driver    作者:datastax    | 项目源码 | 文件源码
def test_thread_safety_during_modification(self):
        hosts = range(100)
        policy = RoundRobinPolicy()
        policy.populate(None, hosts)

        errors = []

        def check_query_plan():
            try:
                for i in xrange(100):
                    list(policy.make_query_plan())
            except Exception as exc:
                errors.append(exc)

        def host_up():
            for i in xrange(1000):
                policy.on_up(randint(0, 99))

        def host_down():
            for i in xrange(1000):
                policy.on_down(randint(0, 99))

        threads = []
        for i in range(5):
            threads.append(Thread(target=check_query_plan))
            threads.append(Thread(target=host_up))
            threads.append(Thread(target=host_down))

        # make the GIL switch after every instruction, maximizing
        # the chance of race conditions
        check = six.PY2 or '__pypy__' in sys.builtin_module_names
        if check:
            original_interval = sys.getcheckinterval()
        else:
            original_interval = sys.getswitchinterval()

        try:
            if check:
                sys.setcheckinterval(0)
            else:
                sys.setswitchinterval(0.0001)
            for t in threads:
                t.start()
            for t in threads:
                t.join()
        finally:
            if check:
                sys.setcheckinterval(original_interval)
            else:
                sys.setswitchinterval(original_interval)

        if errors:
            self.fail("Saw errors: %s" % (errors,))