我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用atexit.unregister()。
def stop(self): if self.commit_timer is not None: self.commit_timer.stop() self.commit() if hasattr(self, '_cleanup_func'): # Remove cleanup handler now that we've stopped # py3 supports unregistering if hasattr(atexit, 'unregister'): atexit.unregister(self._cleanup_func) # pylint: disable=no-member # py2 requires removing from private attribute... else: # ValueError on list.remove() if the exithandler no longer # exists is fine here try: atexit._exithandlers.remove( # pylint: disable=no-member (self._cleanup_func, (self,), {})) except ValueError: pass del self._cleanup_func
def _unregister_cleanup(self): if getattr(self, '_cleanup', None): if hasattr(atexit, 'unregister'): atexit.unregister(self._cleanup) # pylint: disable=no-member # py2 requires removing from private attribute... else: # ValueError on list.remove() if the exithandler no longer exists # but that is fine here try: atexit._exithandlers.remove( # pylint: disable=no-member (self._cleanup, (), {})) except ValueError: pass self._cleanup = None
def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging, atexit logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger
def enable_colors(enable=True): """Allow/disallow colors on TTY output.""" global _colors_enabled if enable and not _colors_enabled: _colors_enabled = True atexit.register(_atexit_disable_colors) elif _colors_enabled and not enable: _colors_enabled = False atexit.unregister(_atexit_disable_colors)
def stop(self) -> None: ''' Flush queued data and stop the client. ''' self._sender_thread.ask_stop() self._sender_thread.join() # Calling stop() at exit is not needed anymore. Calling unregister will allow the interpreter to free this # instance and its memory. atexit.unregister(self.stop)
def register(func, *targs, **kargs): """register a function to be executed upon normal program termination func - function to be called at exit targs - optional arguments to pass to func kargs - optional keyword arguments to pass to func func is returned to facilitate usage as a decorator. """ if hasattr(atexit, "unregister"): atexit.register(func, *targs, **kargs) else: _exithandlers.append((func, targs, kargs)) return func
def unregister(func): """remove func from the list of functions that are registered doesn't do anything if func is not found func = function to be unregistered """ if hasattr(atexit, "unregister"): atexit.unregister(func) else: handler_entries = [e for e in _exithandlers if e[0] == func] for e in handler_entries: _exithandlers.remove(e)
def test_unregister(self): a = [0] def inc(): a[0] += 1 def dec(): a[0] -= 1 for i in range(4): atexit.register(inc) atexit.register(dec) atexit.unregister(inc) atexit._run_exitfuncs() self.assertEqual(a[0], -1)
def test_bound_methods(self): l = [] atexit.register(l.append, 5) atexit._run_exitfuncs() self.assertEqual(l, [5]) atexit.unregister(l.append) atexit._run_exitfuncs() self.assertEqual(l, [5])
def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger
def _stop_nowait(self): ''' Stops the listener without waiting for the _stopped flag. Only called directly if there's an error while starting. ''' with self._opslock: self._running = False # If we were running, kill the process so that the loop breaks free if self._worker is not None and self._worker.returncode is None: self._worker.terminate() atexit.unregister(self.stop)
def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger
def main(argv): p = argparse.ArgumentParser(description='Install DC/OS on OVH Cloud') p.add_argument('--url', help='URL to dcos_generate_config.sh', default='https://downloads.dcos.io/dcos/EarlyAccess/dcos_generate_config.sh') p.add_argument('--project', help='OVH Cloud Project Name', required=True) p.add_argument('--flavor', help='OVH Cloud Machine Type (default hg-15)', default='hg-15') p.add_argument('--image', help='OVH Cloud OS Image (default Centos 7)', default='Centos 7') p.add_argument('--ssh-key', help='OVH Cloud SSH Key Name', required=True) p.add_argument('--security', help='Security mode (default permissive)', default='permissive') p.add_argument('--ssh-user', help='SSH Username (default centos)', default='centos') p.add_argument('--ssh-port', help='SSH Port (default 22)', default=22, type=int) p.add_argument('--region', help='OVH Cloud Region (default SBG1)', default='SBG1') p.add_argument('--name', help='OVH Cloud VM Instance Name(s)', default='Test') p.add_argument('--docker-size', help='Docker Disk Size in GiB (default 10G)', default=10, type=int) p.add_argument('--masters', help='Number of Master Instances (default 1)', default=1, type=int) p.add_argument('--agents', help='Number of Agent Instances (default 1)', default=1, type=int) p.add_argument('--pub-agents', help='Number of Public Agent Instances (default 0)', default=0, type=int) p.add_argument('--no-cleanup', help="Don't clean up Instances on EXIT", dest='cleanup', action='store_false', default=True) p.add_argument('--no-error-cleanup', help="Don't clean up Instances on ERROR", dest='errclnup', action='store_false', default=True) args = p.parse_args(argv) dcos = DCOSInstall(args, OVHInstances(args)) dcos.deploy() if args.cleanup: input('Press Enter to DESTROY all instances...') if not args.errclnup: dcos.oi.cleanup() else: if args.errclnup: atexit.unregister(dcos.oi.cleanup) sys.exit(0)
def stop_frame_grabber(self): if self.frame_grabber_process is None: return None self.frame_grabber_process.kill() self.frame_grabber_process = None atexit.unregister(self._handle_signal)
def __del__(self): """ Destroy the main engine. Flushes the entire circuit down the pipeline, clearing all temporary buffers (in, e.g., optimizers). """ self.flush() try: atexit.unregister(self._delfun) # only available in Python3 except AttributeError: pass
def worker_main(rank, n_parallel, master_rank, use_gpu, syncs): atexit.register(exct.worker_error_close) give_syncs(syncs) if use_gpu: exct.init_gpus(rank) scatterer.assign_rank(n_parallel, rank) connect_as_worker(n_parallel, rank, master_rank, use_gpu) while True: exct.sync.barrier_in.wait() if exct.sync.quit.value: atexit.unregister(exct.worker_error_close) return # (exit successfully) exct_ID = exct.sync.ID.value sub_ID = exct.sync.sub_ID.value if exct_ID == exct.DISTRIBUTE: synk_fs = receive_distribution() elif exct_ID == exct.FUNCTION: synk_fs[sub_ID]() elif exct_ID == exct.GPU_COLL: worker_gpu_coll(sub_ID) elif exct_ID == exct.CPU_COLL: worker_cpu_coll(sub_ID, rank) elif exct_ID == exct.SYNK_COLL: worker_synk_coll(sub_ID) elif exct_ID == exct.DATA: manage_data(sub_ID, syncs.data) else: raise RuntimeError("Invalid worker exec ID: {}".format(exct_ID)) exct.sync.barrier_out.wait() # Prevent premature shmem overwriting.
def do_monitor(args): """ Handle "monitor" mode. """ # If we aren't running in monitor mode, then we are done. if not args.monitor: return # This is the main retry loop. while True: # Fork the process. logger.info('Forking child process.') pid = os.fork() # If we are the child, leave this function and work. if pid == 0: logger.debug('We are a newly spawned child process.') return logger.debug('Child process spawned: %d', pid) # Wait for the child to die. If we die first, kill the child. atexit.register(kill_process, pid) try: _, exit_status = os.waitpid(pid, 0) except KeyboardInterrupt: break atexit.unregister(kill_process) # Process the exit code. signal_number = exit_status & 0xFF exit_code = (exit_status >> 8) & 0xFF core_dump = bool(0x80 & signal_number) if signal_number == 0: logger.info('Child process exited with exit code: %d.', exit_code) else: logger.info('Child process exited with signal %d (core dump: %s).', signal_number, core_dump) retry = False if os.WIFSIGNALED(exit_status): if os.WTERMSIG(exit_status) == signal.SIGSEGV: logger.error('Child process seg faulted.') retry = True if not retry: break sys.exit(0) ###############################################################################
def stop(self, timeout=None): """ Stop the producer (async mode). Blocks until async thread completes. """ if timeout is not None: log.warning('timeout argument to stop() is deprecated - ' 'it will be removed in future release') if not self.async: log.warning('producer.stop() called, but producer is not async') return if self.stopped: log.warning('producer.stop() called, but producer is already stopped') return if self.async: self.queue.put((STOP_ASYNC_PRODUCER, None, None)) self.thread_stop_event.set() self.thread.join() if hasattr(self, '_cleanup_func'): # Remove cleanup handler now that we've stopped # py3 supports unregistering if hasattr(atexit, 'unregister'): atexit.unregister(self._cleanup_func) # pylint: disable=no-member # py2 requires removing from private attribute... else: # ValueError on list.remove() if the exithandler no longer exists # but that is fine here try: atexit._exithandlers.remove( # pylint: disable=no-member (self._cleanup_func, (self,), {})) except ValueError: pass del self._cleanup_func self.stopped = True