我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用celery.Task()。
def after_return(self, status, retval, task_id, args, kwargs, einfo): self.logger.debug('Task %s("%s") returned %s. Result: """%s"""', self.name, args, status, retval) meta = kwargs.get('meta', {}) nolog = meta.get('nolog', False) # In case of emergency log this task if not nolog and not self.all_done: if isinstance(retval, dict): result = retval.copy() else: if einfo: result = {'detail': str(einfo.exception)} else: result = {'detail': str(retval)} if 'meta' not in result: result['meta'] = meta result['meta']['cb_name'] = LOGTASK meta['task_status'] = status meta['cleanup'] = True t = send_task_forever(task_id, LOGTASK, nolog=nolog, args=(result, task_id), kwargs=meta, queue=Q_MGMT, expires=None, task_id=task_id_from_task_id(task_id)) self.logger.warn('Created emergency log task %s', t.id)
def __call__(self, *args, **kwargs): try: _t_args = _walk_obj(args, self._maybe_transform_argument) _t_kwargs = _walk_obj(kwargs, self._maybe_transform_argument) results = super(Task, self).__call__(*_t_args, **_t_kwargs) if hasattr(self.request, 'girder_result_hooks'): if not isinstance(results, tuple): results = (results, ) results = tuple([self._maybe_transform_result(i, r) for i, r in enumerate(results)]) return results finally: _walk_obj(args, self._maybe_cleanup) _walk_obj(kwargs, self._maybe_cleanup)
def test_patch_task(self): """ When celery.Task is patched we patch the __init__, apply, apply_async, and run methods """ # Assert base class methods are patched self.assertIsInstance(celery.Task.__init__, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.apply, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.run, wrapt.BoundFunctionWrapper) # Create an instance of a Task task = celery.Task() # Assert instance methods are patched self.assertIsInstance(task.__init__, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.apply, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.apply_async, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.run, wrapt.BoundFunctionWrapper)
def test_unpatch_task(self): """ When unpatch_task is called on a patched task we unpatch the __init__, apply, apply_async, and run methods """ # Assert base class methods are patched self.assertIsInstance(celery.Task.__init__, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.apply, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper) self.assertIsInstance(celery.Task.run, wrapt.BoundFunctionWrapper) # Unpatch the base class unpatch_task(celery.Task) # Assert the methods are no longer wrapper self.assertFalse(isinstance(celery.Task.__init__, wrapt.BoundFunctionWrapper)) self.assertFalse(isinstance(celery.Task.apply, wrapt.BoundFunctionWrapper)) self.assertFalse(isinstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper)) self.assertFalse(isinstance(celery.Task.run, wrapt.BoundFunctionWrapper))
def on_failure(self, exc: Exception, task_id: int, args: list, kwargs: dict, einfo: ExceptionInfo): # pragma: no cover logger = get_logger('celery.worker') warning = ('Task {task}[{t_id}] had unexpected failure:\n' '\nargs: {args}\n\nkwargs: {kwargs}\n' '\n{einfo}').format(task=self.name, t_id=task_id, args=args, kwargs=kwargs, einfo=einfo) logger.warning(warning) super().on_failure(exc, task_id, args, kwargs, einfo)
def make_task(self): """Create an celery task responsible of running reporter covers asynchronously. :returns: An celery task. :rtype: :class:`~celery.Task` """ class CallbackTask(Task): def on_success(task, retval, task_id, args, kwargs): schedule = self.backend.get_schedule(args[0]) self.on_cover_success(schedule, retval) def on_failure(task, exc, task_id, args, kwargs, einfo): schedule = self.backend.get_schedule(args[0]) self.on_cover_failure(schedule, exc) # make `run_cover` method into a celery task run_cover = self._make_run_cover() return self.celery.task(bind=True, base=CallbackTask)(run_cover)
def after_return(self, status, retval, task_id, args, kwargs, einfo): """ Handler called after the task returns. :param status: Current task state. :param retval: Task return value/exception. :param task_id: Unique id of the task. :param args: Original arguments for the task that returned. :param kwargs: Original keyword arguments for the task that returned. :param einfo: ExceptionInfo instance, containing the traceback (if any). :return: None """ logger.debug( "In %s.after_return: %s, %s, %s, %s." % (self.__class__.__name__, status, retval, task_id, einfo) ) self.__decrement_request_tags() super(WebSightBaseTask, self).after_return(status, retval, task_id, args, kwargs, einfo)
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, shadow=None, **options): # Pass girder related job information through to # the signals by adding this information to options['headers'] # This sets defaults for reserved_options based on the class defaults, # or values defined by the girder_job() dectorator headers = { 'girder_job_title': self._girder_job_title, 'girder_job_type': self._girder_job_type, 'girder_job_public': self._girder_job_public, 'girder_job_handler': self._girder_job_handler, 'girder_job_other_fields': self._girder_job_other_fields, } # Certain keys may show up in either kwargs (e.g. via # .delay(girder_token='foo') or in options (e.g. # .apply_async(args=(), kwargs={}, girder_token='foo') For # those special headers, pop them out of kwargs or options and # put them in headers so they can be picked up by the # before_task_publish signal. for key in self.reserved_headers + self.reserved_options: if kwargs is not None and key in kwargs: headers[key] = kwargs.pop(key) if key in options: headers[key] = options.pop(key) if 'headers' in options: options['headers'].update(headers) else: options['headers'] = headers return super(Task, self).apply_async( args=args, kwargs=kwargs, task_id=task_id, producer=producer, link=link, link_error=link_error, shadow=shadow, **options)
def is_revoked(task): """ Utility function to check is a task has been revoked. :param task: The task. :type task: celery.app.task.Task :return True, if this task is in the revoked list for this worker, False otherwise. """ return task.request.id in _revoked_tasks(task)
def task_id_sent_handler(sender=None, result = None, **kwargs): # information about task are located in headers for task messages # using the task protocol version 2. print(type(result)) print(result) print('\n ***Task Success***') url = 'http://localhost:8000' requests.post(url) # @task_revoked.connect # def my_task_revoked_handler(sender=None, body=None, *args, **kwargs): # print(kwargs['request'].args)
def setUp(self): self.broker_url = 'redis://127.0.0.1:{port}/0'.format(port=REDIS_CONFIG['port']) self.tracer = get_dummy_tracer() self.pin = Pin(service='celery-test', tracer=self.tracer) patch_app(celery.Celery, pin=self.pin) patch_task(celery.Task, pin=self.pin)
def tearDown(self): unpatch_app(celery.Celery) unpatch_task(celery.Task)
def test_task_init(self): """ Creating an instance of a patched celery.Task will yield a patched instance """ task = celery.Task() # Assert instance methods are patched self.assertIsInstance(task.__init__, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.apply, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.apply_async, wrapt.BoundFunctionWrapper) self.assertIsInstance(task.run, wrapt.BoundFunctionWrapper)
def __call__(self, cmd, *args, **kwargs): self.logger = get_task_logger('que.tasks') self.all_done = False task = 'Task %s("%s")' % (self.name, cmd) lock = kwargs.pop('lock', False) block = kwargs.pop('block', None) check_user_tasks = kwargs.pop('check_user_tasks', False) tid = self.request.id blocked = False if lock: task_lock = TaskLock(lock, desc=task, logger=self.logger) else: task_lock = NoLock() try: if check_user_tasks: # Wait for task to appear in UserTasks - bug #chili-618 UserTasks.check(tid, logger=self.logger) # Will raise an exception in case the task does not show up task_lock.task_check() # Will raise an exception in case the lock does not exist if block and redis.exists(block): blocked = True self.retry(exc=TaskRetry(None)) # Will raise special exception return super(MetaTask, self).__call__(cmd, *args, **kwargs) # run() finally: if not blocked: # Lock must _not_ be deleted when failing on retry task_lock.delete()
def test_run(django_scheduler, django_schedule): django_scheduler.set_task() assert(isinstance(django_scheduler.celery_task, celery.Task))
def after_return(self, *args, **kwargs): logger.debug('Task returned: {0!r}'.format(self.request))
def send_email(self: celery.Task, to_name: str, to_addr: str, subject: str, text: str, html: str): """Send an email to a single address.""" # WARNING: when changing the signature of this function, also change the # self.retry() call below. cfg = current_app.config # Construct the message msg = EmailMessage() msg['Subject'] = subject msg['From'] = Address(cfg['MAIL_DEFAULT_FROM_NAME'], addr_spec=cfg['MAIL_DEFAULT_FROM_ADDR']) msg['To'] = (Address(to_name, addr_spec=to_addr),) msg.set_content(text) msg.add_alternative(html, subtype='html') # Refuse to send mail when we're testing. if cfg['TESTING']: log.warning('not sending mail to %s <%s> because we are TESTING', to_name, to_addr) return log.info('sending email to %s <%s>', to_name, to_addr) # Send the message via local SMTP server. try: with smtplib.SMTP(cfg['SMTP_HOST'], cfg['SMTP_PORT'], timeout=cfg['SMTP_TIMEOUT']) as smtp: if cfg.get('SMTP_USERNAME') and cfg.get('SMTP_PASSWORD'): smtp.login(cfg['SMTP_USERNAME'], cfg['SMTP_PASSWORD']) smtp.send_message(msg) except (IOError, OSError) as ex: log.exception('error sending email to %s <%s>, will retry later: %s', to_name, to_addr, ex) self.retry((to_name, to_addr, subject, text, html), countdown=cfg['MAIL_RETRY']) else: log.info('mail to %s <%s> successfully sent', to_name, to_addr)
def _config_celery(self): """Disables Celery by entirely mocking it. Without this, actual Celery tasks will be created while the tests are running. """ from celery import Celery, Task self.celery = unittest.mock.MagicMock(Celery) def fake_task(*task_args, bind=False, **task_kwargs): def decorator(f): def delay(*args, **kwargs): if bind: return f(decorator.sender, *args, **kwargs) else: return f(*args, **kwargs) f.delay = delay f.si = unittest.mock.MagicMock() f.s = unittest.mock.MagicMock() return f if bind: decorator.sender = unittest.mock.MagicMock(Task) return decorator self.celery.task = fake_task
def apply_async(self, *args, **kwargs): """ Override the default Celery Task apply_async to allow for the passing of tags to tasks. :param args: Positional arguments. :param kwargs: Keyword arguments. :return: The results of calling super.apply_async. """ tags = kwargs.get("tags", []) headers = kwargs.get("headers", {}) retries = kwargs.get("retries", 0) task_args, task_kwargs = args tags.extend(self.__get_tags_from_task_kwargs(task_kwargs)) try: del kwargs["chord"]["options"]["producer"] except (TypeError, KeyError): pass if tags is not None and not isinstance(tags, list): raise ValueError( "Got an unexpected value for the tags keyword argument to apply_async: %s." % (tags,) ) if len(tags) > 0: if retries > 0: logger.debug( "Not incrementing tags %s as apply_async resulted from retry." % (tags,) ) else: self.__increment_tags(tags) headers["tags"] = tags kwargs["headers"] = headers return super(WebSightBaseTask, self).apply_async(*args, **kwargs)
def start_time(self): """ Get the time at which this task started. Note that this relies on the task_prerun_handler signal hook in app.py. :return: the time at which this task started. Note that this relies on the task_prerun_handler signal hook in app.py. """ if self._start_time is None: logger.warning( "Start time not set! Task was %s (ID %s)." % (self.name, self.request.id) ) return self._start_time
def after_return(self, status, retval, task_id, args, kwargs, einfo): """ Ensures that any database session opened by the Task is closed. :param status: Current task state. :param retval: Task return value/exception. :param task_id: Unique id of the task. :param args: Original arguments for the task that returned. :param kwargs: Original keyword arguments for the task that returned. :param einfo: ExceptionInfo instance, containing the traceback (if any). :return: None """ super(DatabaseTask, self).after_return(status, retval, task_id, args, kwargs, einfo)
def on_failure(self, exc, task_id, args, kwargs, einfo): extra = { 'task name': self.name, 'task id': task_id, 'task args': args, 'task kwargs': kwargs, } _logger.error("Celery Task Failed", exc_info=einfo, extra=extra)
def make_celery(app=None): if app is None: app = create_app('kubedock', os.path.dirname(__file__)) if SENTRY_ENABLE: import socket import celery import raven from raven.contrib.celery import register_signal from raven.contrib.celery import register_logger_signal from kubedock.settings import MASTER_IP from kubedock.settings import SENTRY_DSN, SENTRY_EXCLUDE_PATHS from kubedock.settings import SENTRY_PROCESSORS from kubedock.utils import get_version from kubedock.kapi.licensing import get_license_info authkey = get_license_info().get('auth_key', 'no installation id') from celery.utils import log class Celery(celery.Celery): def on_configure(self): hostname = "{}({})".format(socket.gethostname(), MASTER_IP) tags = {'installation_id': authkey} client = raven.Client(SENTRY_DSN, name=hostname, release=get_version('kuberdock'), tags=tags, processors=SENTRY_PROCESSORS, exclude_paths=SENTRY_EXCLUDE_PATHS) # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) else: from celery import Celery celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL']) celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True flask_app = app def __call__(self, *args, **kwargs): with app.app_context(): env.user = 'root' env.key_filename = SSH_KEY_FILENAME return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery