Python celery 模块,group() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用celery.group()

项目:Software-Architecture-with-Python    作者:PacktPublishing    | 项目源码 | 文件源码
def mandelbrot_main(w, h, max_iterations=1000, output='mandelbrot_celery.png'):
    """ Main function for mandelbrot program with celery """

    job = group([mandelbrot_calc_row.s(y, w, h, max_iterations) for y in range(h)])
    result = job.apply_async()

    image = Image.new('RGB', (w, h))

    for image_rows in result.join():
        for k,v in image_rows.items():
            k = int(k)
            v = tuple(map(int, v))
            x,y = k % args.width, k // args.width
            image.putpixel((x,y), v)

    image.save(output, 'PNG')
    print('Saved to',output)
项目:falsy    作者:pingf    | 项目源码 | 文件源码
def loads(payload):
    if payload.get('type') != 'normal':
        raise Exception('celery task loader only support normal mode')
    tasks = payload.get('tasks', [])
    cts = []
    for task in tasks:
        ops = [load(id, task.get('args'), task.get('on_error')) if i == 0 else load(id, None, task.get('on_error')) for
               i, id in enumerate(task['ids'])]
        cts.append(chain(ops))
    callback = payload.get('callback')
    if callback:
        return chord(header=group(cts), body=func.load(callback).s())
    return group(cts)
项目:CA-NEAT    作者:mathiasose    | 项目源码 | 文件源码
def initialize_generation(db_path: str, scenario_id: int, generation: int, genotypes: List[Genome],
                          pair_selection_f: PAIR_SELECTION_F_T, fitness_f: FITNESS_F_T, neat_config: CPPNNEATConfig,
                          ca_config: CAConfig) -> None:
    from celery import group, chord

    grouped_tasks = group(handle_individual.s(
        scenario_id=scenario_id,
        generation=generation,
        individual_number=i,
        genotype=genotype,
        fitness_f=fitness_f,
        ca_config=ca_config,
    ) for i, genotype in enumerate(genotypes))

    final_task = persist_results.subtask(
        args=(db_path, scenario_id, generation, fitness_f, pair_selection_f, neat_config, ca_config),
    )

    chord(grouped_tasks, final_task)()
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def __init__(self, headers, job_info):
        self.values = {
            'group_id': headers['group'] or '',
            'root_id': headers['root_id'],
            'task_id': headers['id'],
            'task_name': headers['task'],
            'task_args': headers['argsrepr'],
            'task_kwargs': headers['kwargsrepr'],
            'app_name': headers['task'].rsplit('.', 1)[0],
            'plugin': job_info['Plugin'],
        }

        # set defaults
        if self.values['group_id']:
            job_info.setdefault('BatchName', '{plugin}-{group_id}')
        job_info.setdefault('Name', '{plugin}-{task_name}-{task_id}')
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def job(plugin_name, frames, job_info=None, plugin_info=None):
    """
    Create a group of tasks for executing each of the frame packets for
    the given Deadline plugin.

    Parameters
    ----------
    plugin_name : str
    frames : str

    Returns
    -------
    celery.group
    """
    deadline_group_id = ObjectId()
    return group(
        [plugin_task.signature((plugin_name, frames, frame, i),
                               plugin_info=plugin_info,
                               job_info=job_info,
                               deadline_group_id=deadline_group_id)
         for i, frame in enumerate(parse_frames(frames))])

# --

# FIXME: unused:
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def list_to_set_reducer(self, groups):
    """Flatten nested lists to a set of items

    Expected shape of input, List of group results, each item in
    group results is list of items to reduce.
    [[[item1, item2], [item2, item3]], [[item4, item5]]]

    :param groups: List of group results. Each group result is expected to be
    an itterable containing itterables of set members.

    :returns: List of unique values from the input
    :rtype: list
    """
    # TODO does this assume too much knowledge of the shape of the input?
    # print 'list_to_set_reducer: {}'.format(groups)
    s = set()
    for g in groups:
        for i in g:
            s.update(i)

    return list(s)
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def get_end_task(self, tasks, required_kwargs):
        """Accepts any number of tasks as returned by _get_pipeline.

        :param tasks: dictionary of str:info where str is the name of the task, info is from the registry
        :param dict required_kwargs: Keyword arguments that some tasks require

        :returns: celery.Signature, or celery.group, or None
        """

        sigs = [
            self.make_signature(info, required_kwargs)
            for name, info in tasks.items()
            if info['after'] is ALL
        ]

        if not sigs:
            return None

        return sigs[0] if len(sigs) == 1 else group(sigs)
项目:django-user-tasks    作者:edx    | 项目源码 | 文件源码
def test_create_chord_exclude_body(self):
        """If the body task of a chord is not a UserTask, it should be cleanly omitted from the status."""
        chord([
            sample_task.s(self.user.id, '1', user_task_name='Chord: 1 & 2'),
            sample_task.s(self.user.id, '2', user_task_name='I should be ignored')
        ])(normal_task.s('3'))
        assert UserTaskStatus.objects.count() == 4
        chord_status = UserTaskStatus.objects.get(task_class='celery.chord')
        assert chord_status.task_id
        assert chord_status.parent is None
        assert chord_status.is_container
        assert chord_status.name == 'Chord: 1 & 2'
        assert chord_status.total_steps == 2
        verify_state(chord_status, False)

        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id
        assert group_status.parent_id == chord_status.id
        assert group_status.is_container
        assert group_status.name == 'Chord: 1 & 2'
        assert group_status.total_steps == 2
        verify_state(group_status, False)

        header_tasks = UserTaskStatus.objects.filter(parent=group_status)
        assert len(header_tasks) == 2
        for status in header_tasks:
            assert status.task_id
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, False)
项目:django-user-tasks    作者:edx    | 项目源码 | 文件源码
def _create_group(self, eager):
        """Create a celery group and verify some assertions about the corresponding status records"""
        result = group(sample_task.s(self.user.id, '1'),
                       sample_task.s(self.user.id, '2', user_task_name='Group: 1, 2')).delay()
        assert UserTaskStatus.objects.count() == 3
        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id == result.id
        assert group_status.parent is None
        assert group_status.is_container
        assert group_status.name == 'Group: 1, 2'
        assert group_status.total_steps == 2
        verify_state(group_status, eager)

        assert len(result.children) == 2
        for result in result.children:
            task_id = result.id
            status = UserTaskStatus.objects.get(task_id=task_id)
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, eager)
项目:thorn    作者:robinhood    | 项目源码 | 文件源码
def as_request_group(self, requests):
        return group(
            dispatch_requests.s([req.as_dict() for req in chunk])
            for chunk in self.group_requests(requests)
        )
项目:thorn    作者:robinhood    | 项目源码 | 文件源码
def send(self, event, payload, sender,
             timeout=None, context=None, **kwargs):
        # the requests are sorted by url, so we group them into chunks
        # each containing a list of requests for that host/port/scheme pair,
        # with up to :setting:`THORN_CHUNKSIZE` requests each.
        #
        # this way requests have a good chance of reusing keepalive
        # connections as requests with the same host are grouped together.
        return self.as_request_group(self.prepare_requests(
            event, payload, sender, timeout, context, **kwargs)).delay()
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def batch_update_user_data():
    """
    Create sub tasks to update user data like enrollments,
    certificates and grades from edX platform.
    """
    expiration = now_in_utc() + timedelta(hours=5)
    lock = Lock(LOCK_ID, expiration)
    if not lock.acquire():
        # Lock should have expired by now
        log.error("Unable to acquire lock for batch_update_user_data")
        return

    users_to_refresh = calculate_users_to_refresh_in_bulk()

    jobs = release_batch_update_user_data_lock.s(token=lock.token.decode())
    try:
        if len(users_to_refresh) > 0:
            user_id_chunks = chunks(users_to_refresh)

            job = group(
                batch_update_user_data_subtasks.s(user_id_chunk, expiration.timestamp())
                for user_id_chunk in user_id_chunks
            )
            jobs = job | jobs
    finally:
        jobs.delay()
项目:covercache_public    作者:DarienLibrary    | 项目源码 | 文件源码
def get_recommendations(self):
        from . import tasks
        identifiers = Identifier.objects.filter(
            source='isbn',
            manifestations__in=self.manifestations.all())
        grouped_identifier_ids = group(
            tasks.get_recommendations.s(i.pk)
            for i in identifiers)().get()
        result_identifier_ids = itertools.chain.from_iterable(grouped_identifier_ids)
        works = Work.objects.filter(pk__in=result_identifier_ids)
        return works
项目:toptal-blog-celery-toy-ex    作者:Rustem    | 项目源码 | 文件源码
def produce_hot_repo_report(period, ref_date=None):
    # 1. parse date
    ref_date_str = strf_date(period, ref_date=ref_date)

    # 2. fetch and join
    fetch_jobs = group([
        fetch_hot_repos.s(ref_date_str, 100, 1),
        fetch_hot_repos.s(ref_date_str, 100, 2),
        fetch_hot_repos.s(ref_date_str, 100, 3),
        fetch_hot_repos.s(ref_date_str, 100, 4),
        fetch_hot_repos.s(ref_date_str, 100, 5)
    ])
    # 3. group by language and
    # 4. create csv
    return chord(fetch_jobs)(build_report_task.s(ref_date_str)).get()
项目:toptal-blog-celery-toy-ex    作者:Rustem    | 项目源码 | 文件源码
def build_report_task(results, ref_date):
    all_repos = []
    for repos in results:
        all_repos += [Repository(repo) for repo in repos]

    # 3. group by language
    grouped_repos = {}
    for repo in all_repos:
        if repo.language in grouped_repos:
            grouped_repos[repo.language].append(repo.name)
        else:
            grouped_repos[repo.language] = [repo.name]

    # 4. create csv
    lines = []
    for lang in sorted(grouped_repos.keys()):
        lines.append([lang] + grouped_repos[lang])

    filename = '{media}/github-hot-repos-{date}.csv'.format(media=settings.MEDIA_ROOT, date=ref_date)
    return make_csv(filename, lines)
项目:nzhuts    作者:jordij    | 项目源码 | 文件源码
def fetch_all_huts():
    headers = {'x-api-key': settings.API_KEY}
    subtasks = []
    try:
        r = requests.get(settings.API_HUTS_BASE_URL, headers=headers, timeout=settings.API_TIMEOUT)
    except requests.exceptions.RequestException as e:
        logger.exception(str(e))
    if r.status_code == 200:
        for h in r.json():
            subtasks.append(fetch_hut.s(h['assetId']))
    else:
        logger.error("Failed huts request with status %s, %s", str(r.status_code), r.json()['message'])
    results = group(subtasks)()  # in parallel
    results.get()
项目:munch-core    作者:crunchmail    | 项目源码 | 文件源码
def start_sending(self):
        from .tasks import send_mail

        with transaction.atomic():
            # Then, handle all ignored emails (optouts)
            now = timezone.now()
            for i in self.mails.not_legit_for(self):
                MailStatus.objects.create(
                    mail=i, status=MailStatus.IGNORED,
                    creation_date=timezone.now(),
                    raw_msg='Ignored because of previous optout {}'.format(
                        i.get_related_optout()))

            # Then, handle the legit ones
            if not self.send_date:
                self.send_date = timezone.now()
                self.save()
            self.notify(Message.SENDING)
            # locking ?
            legit_mails = self.mails.legit_for(self)
            now = timezone.now()
            MailStatus.objects.bulk_create([
                MailStatus(
                    mail=i, status=MailStatus.QUEUED,
                    creation_date=now, raw_msg='Enqueued in celery')
                for i in legit_mails])
            # bulk_create do not update Mail (too high db cost)
            legit_mails.update(
                curstatus=MailStatus.QUEUED, latest_status_date=now)
            # end_locking ?
            tasks = celery.group([send_mail.s(m.pk) for m in legit_mails])
            log.info('Starting sending {} (#{}) to {} recipients.'.format(
                self, self.pk, len(tasks)))

        tasks.apply_async()
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def manyshort(self):
        self.join(group(add.s(i, i) for i in range(1000))(),
                  timeout=10, propagate=True)
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def always_timeout(self):
        self.join(
            group(sleeping.s(1).set(time_limit=0.1)
                  for _ in range(100))(),
            timeout=10, propagate=False,
        )
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def alwayskilled(self):
        g = group(kill.s() for _ in range(10))
        self.join(g(), timeout=10)
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def alwaysexits(self):
        g = group(exiting.s() for _ in range(10))
        self.join(g(), timeout=10)
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def _evil_groupmember(self, evil_t, *eargs, **opts):
        g1 = group(add.s(2, 2).set(**opts), evil_t.s(*eargs).set(**opts),
                   add.s(4, 4).set(**opts), add.s(8, 8).set(**opts))
        g2 = group(add.s(3, 3).set(**opts), add.s(5, 5).set(**opts),
                   evil_t.s(*eargs).set(**opts), add.s(7, 7).set(**opts))
        self.join(g1(), timeout=10)
        self.join(g2(), timeout=10)
项目:cyanide    作者:celery    | 项目源码 | 文件源码
def _revoketerm(self, wait=None, terminate=True,
                    joindelay=True, data=BIG):
        g = group(any_.s(data, sleep=wait) for i in range(8))
        r = g()
        if terminate:
            if joindelay:
                sleep(random.choice(range(4)))
            r.revoke(terminate=True)
        self.join(r, timeout=10)
项目:rosie-ci    作者:adafruit    | 项目源码 | 文件源码
def test_commit(repo, ref, tag):
    chain = start_test.s(repo, ref) | group(test_board.s(ref=ref, repo=repo, tag=tag, board=board) for board in config["devices"]) | finish_test.s(repo, ref)
    chain.delay()

# Adapted from: https://gist.github.com/andrewgross/8ba32af80ecccb894b82774782e7dcd4
项目:skp_edu_docker    作者:TensorMSA    | 项目源码 | 文件源码
def train_networks(self, networks):
        """
        train each networks on cluster server
        :param networks: network lists
        :return: networks
        """
        try :
            tasks = []
            #i = inspect()
            #if (i.active() == None):
            if (self.debug_mode):
                # for debug you can run all tasks on django process
                for network in networks:
                    if(network['flag'] == True ) :
                        continue
                    result = train(network.get('nn_id'), str(network.get('nn_wf_ver_id')))
                    key = '_'.join([network['nn_id'], str(network['nn_wf_ver_id'])])
                    network['acc'] = result[key].get('accuracy')
                    network['flag'] = True
            else :
                # You can use cluster servers for faster hyper parameter searching
                # using cluster server with celery for genetic algorithm
                for network in networks :
                    if (network['flag'] == True):
                        continue
                    tasks.append(train.subtask((network.get('nn_id'), str(network.get('nn_wf_ver_id')))))
                results = group(tasks).apply_async()
                results = results.join()
                for result in results :
                    for network in networks :
                        key = '_'.join([network['nn_id'], str(network['nn_wf_ver_id'])])
                        if(key in list(result.keys()) and result[key] is not None and result[key].get('accuracy') is not None) :
                            network['acc'] = result[key].get('accuracy')
                            network['flag'] = True
            return networks
        except Exception as e :
            logging.error("Error on training : {0} ".format(e))
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def test():
    # print add.delay(2, 2)

    from celery import group, chain

    job_info = {
        'Name': '{task_name}{task_args}',
        'BatchName': 'celery-{root_id}'
    }

    job = group([add.s(2, 2), add.s(4, 4)])
    result = job.apply_async(job_info=job_info)
    print("waiting for results:")
    for x in result.iterate(propagate=False):
        print("result is:" % x)
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def test_fail():
    from celery import group
    job = group([add.s(2, 2), fail.s(), add.s(4, 4)])
    # job_info or plugin_info must be passed to trigger submission to deadline
    result = job.apply_async(job_info={})
    print("waiting for results:")
    # failure aborts iteration when propagate=True
    for x in result.iterate():
        print("result is:" % x)
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def __handle_processed_messages(self):
        """
        Perform any final housekeeping once all of the messages in the queue have been processed.
        :return: None
        """
        from tasknode.tasks import handle_scanning_order_from_pubsub
        logger.debug(
            "Now handling all processed messages (%s keys in targets)."
            % (len(self._targets),)
        )
        for k, v in self._targets.iteritems():
            if len(v) > 0:
                targets = list(set(v))
                task_sig = handle_scanning_order_from_pubsub.si(
                    org_uuid=k,
                    targets=targets,
                )
                task_sig.options["queue"] = config.celery_priority_queue_name
                self._tasks.append(task_sig)
                self._messages.append(
                    "Total of %s targets defined for organization %s."
                    % (len(targets), k)
                )
        logger.debug(
            "Total number of tasks to kick off is %s."
            % (len(self._tasks),)
        )
        if len(self._tasks) > 0:
            canvas_sig = group(self._tasks)
            canvas_sig.apply_async()
        logger.debug("Tasks kicked off.")
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def gather_data_for_domain_name(
        self,
        org_uuid=None,
        domain_uuid=None,
        domain_scan_uuid=None,
        domain_name=None,
        order_uuid=None,
):
    """
    Perform all data gathering for the given domain name.
    :param org_uuid: The UUID of the organization to retrieve data for.
    :param domain_uuid: The UUID of the parent domain name that is being investigated.
    :param domain_scan_uuid: The UUID of the domain name scan that this task is a part of.
    :param domain_name: The domain name to collect data for.
    :return: None
    """
    logger.info(
        "Now gathering information for domain name %s (parent domain %s)."
        % (domain_name, domain_uuid)
    )
    record_types = get_dns_record_types_for_scan()
    task_sigs = []
    for record_type, do_scanning in record_types:
        task_sigs.append(resolve_domain_name_for_organization.si(
            org_uuid=org_uuid,
            domain_uuid=domain_uuid,
            domain_scan_uuid=domain_scan_uuid,
            record_type=record_type,
            order_uuid=order_uuid,
        ))
    canvas_sig = group(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def get_whois_data_for_ip_address(
        self,
        org_uuid=None,
        ip_address_uuid=None,
        ip_address_scan_uuid=None,
        order_uuid=None,
):
    """
    Retrieve WHOIS data for the given IP address.
    :param org_uuid: The UUID of the organization to perform data retrieval on behalf of.
    :param ip_address_uuid: The UUID of the IP address to retrieve data about.
    :param ip_address_scan_uuid: The UUID of the IP address scan to associate retrieved data with.
    :return: None
    """
    logger.info(
        "Now retrieving WHOIS information for IP address %s."
        % (ip_address_uuid,)
    )
    task_sigs = []
    task_kwargs = {
        "org_uuid": org_uuid,
        "ip_address_uuid": ip_address_uuid,
        "ip_address_scan_uuid": ip_address_scan_uuid,
        "order_uuid": order_uuid,
    }
    task_sigs.append(get_arin_whois_data_for_ip_address.si(**task_kwargs))
    if len(task_sigs) > 1:
        collection_sig = group(task_sigs)
    else:
        collection_sig = task_sigs[0]
    self.finish_after(signature=collection_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def zmap_scan_order(self, order_uuid=None):
    """
    Perform Zmap scans for all necessary ports for the given order.
    :param order_uuid: The UUID of the order to scan.
    :return: None
    """
    port_tuples = get_ports_to_scan_for_scan_config(
        config_uuid=self.scan_config.uuid,
        db_session=self.db_session,
    )
    logger.info(
        "Now scanning order %s for %s total ports."
        % (order_uuid, len(port_tuples))
    )
    task_signatures = []
    scan_signatures = []
    network_scan = create_network_scan_for_organization(
        db_session=self.db_session,
        org_uuid=self.org_uuid,
    )
    self.commit_session()
    for port, protocol in port_tuples:
        scan_signatures.append(zmap_scan_order_for_port.si(
            port=port,
            protocol=protocol,
            order_uuid=order_uuid,
            network_scan_uuid=network_scan.uuid,
        ))
    task_signatures.append(group(scan_signatures))
    task_signatures.append(update_zmap_scan_completed.si(
        scan_uuid=network_scan.uuid,
        org_uuid=self.org_uuid,
        order_uuid=order_uuid,
    ))
    logger.info("Kicking off Zmap subtasks now.")
    canvas_sig = chain(task_signatures)
    canvas_sig.apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def handle_placed_order(self, order_uuid=None):
    """
    Handle the placement of the given order.
    :param order_uuid: The UUID of the order that was placed.
    :return: None
    """
    logger.info(
        "Now handling the placement of order %s."
        % (order_uuid,)
    )
    task_sigs = []
    scan_config = self.scan_config
    if scan_config.scan_domain_names:
        domain_count = count_domains_for_order(db_session=self.db_session, order_uuid=order_uuid)
        logger.info(
            "Domain count for order %s is %s."
            % (order_uuid, domain_count)
        )
        if domain_count > 0:
            task_sigs.append(initiate_domain_scans_for_order.si(order_uuid=order_uuid, scan_endpoints=True))
    if scan_config.scan_network_ranges:
        network_count = count_networks_for_order(db_session=self.db_session, order_uuid=order_uuid)
        logger.info(
            "Networks count for order %s is %s."
            % (order_uuid, network_count)
        )
        if network_count > 0:
            task_sigs.append(initiate_network_scans_for_order.si(order_uuid=order_uuid, requeue=False))
    if len(task_sigs) > 0:
        task_sigs.append(handle_order_completion.si(order_uuid=order_uuid))
        canvas_sig = group(task_sigs)
        canvas_sig.apply_async()
        logger.info(
            "All scanning tasks for order %s kicked off successfully."
            % (order_uuid,)
        )
    else:
        logger.warning("No tasks were created as a result of call to handle_placed_order.")


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def screenshot_web_service(
        self,
        web_service_uuid=None,
        org_uuid=None,
        web_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Take screenshots of all the relevant endpoints for the given web service.
    :param web_service_uuid: The UUID of the web service to take screenshots for.
    :param org_uuid: The UUID of the organization that owns the web service.
    :param web_service_scan_uuid: The UUID of the scan that this screenshotting is being done in.
    :return: None
    """
    logger.info(
        "Now taking screenshots of all relevant endpoints for web service %s. Organization is %s, scan is %s."
        % (web_service_uuid, org_uuid, web_service_scan_uuid)
    )
    url_paths = get_url_paths_to_screenshot(
        service_uuid=web_service_uuid,
        db_session=self.db_session,
        scan_uuid=web_service_scan_uuid,
    )
    logger.info(
        "A total of %s URL paths remain to be screenshotted for web service %s."
        % (len(url_paths), web_service_uuid)
    )
    task_sigs = []
    for url_path in url_paths:
        task_sigs.append(screenshot_web_service_url.si(
            web_service_uuid=web_service_uuid,
            org_uuid=org_uuid,
            web_service_scan_uuid=web_service_scan_uuid,
            url_path=url_path,
            order_uuid=order_uuid,
        ))
    canvas_sig = group(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def enumerate_user_agent_fingerprints_for_web_service(
        self,
        org_uuid=None,
        web_service_uuid=None,
        web_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Perform fingerprinting for the given web service to determine if different user agents result in different
    responses being returned.
    :param org_uuid: The UUID of the organization to fingerprint the web service on behalf of.
    :param web_service_uuid: The UUID of the web service to gather fingerprints for.
    :param web_service_scan_uuid: The UUID of the web service scan to perform fingerprinting for.
    :return: None
    """
    logger.info(
        "Now enumerating user agent fingerprints for web service scan %s."
        % (web_service_scan_uuid,)
    )
    user_agents_file = UserAgentCsvFileWrapper.from_default_file()
    task_sigs = []
    for user_agent in user_agents_file.user_agents:
        task_sigs.append(get_user_agent_fingerprint_for_web_service.si(
            org_uuid=org_uuid,
            web_service_uuid=web_service_uuid,
            web_service_scan_uuid=web_service_scan_uuid,
            user_agent_type=user_agent.agent_type,
            user_agent_name=user_agent.agent_name,
            user_agent_string=user_agent.user_agent,
            order_uuid=order_uuid,
        ))
    canvas_sig = group(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def update_latest_web_service_reports_for_organization(self, org_uuid=None):
    """
    Update all of the web service reports for the given organization based on the current state of the web
    service inspector.
    :param org_uuid: The UUID of the organization to update web service reports for.
    :return: None
    """
    logger.info(
        "Now updating all web service reports for organization %s."
        % (org_uuid,)
    )
    report_ids = get_latest_web_service_report_ids(org_uuid)
    logger.info(
        "Total of %s web service reports found for organization %s."
        % (len(report_ids), org_uuid)
    )
    task_sigs = []
    for report_id in report_ids:
        task_sigs.append(update_web_service_report_for_organization.si(
            doc_id=report_id,
            org_uuid=org_uuid,
            is_latest=True,
        ))
    canvas_sig = group(task_sigs)
    logger.info(
        "Kicking off a total of %s tasks to update web service reports for organization %s."
        % (len(task_sigs), org_uuid)
    )
    self.finish_after(signature=canvas_sig)
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def fingerprint_virtual_hosts(
        self,
        org_uuid=None,
        network_service_uuid=None,
        network_service_scan_uuid=None,
        use_ssl=None,
        order_uuid=None,
):
    """
    Perform fingerprinting for virtual hosts for the given network service.
    :param org_uuid: The UUID of the organization to perform fingerprinting on behalf of.
    :param network_service_uuid: The UUID of the network service to fingerprint.
    :param network_service_scan_uuid: The UUID of the network service scan that this fingerprinting is a part
    of.
    :param use_ssl: Whether or not to use SSL to connect to the remote endpoint.
    :return: None
    """
    logger.info(
        "Now starting to fingerprint virtual hosts for service %s. Organization is %s."
        % (network_service_uuid, org_uuid)
    )
    domain_names = get_all_domains_for_organization(org_uuid=org_uuid, db_session=self.db_session)
    task_sigs = []
    for domain_name in domain_names:
        task_sigs.append(fingerprint_virtual_host.si(
            org_uuid=org_uuid,
            network_service_uuid=network_service_uuid,
            network_service_scan_uuid=network_service_scan_uuid,
            use_ssl=use_ssl,
            hostname=domain_name,
        ))
    logger.info(
        "Now kicking off a total of %s tasks to fingerprint service %s."
        % (len(task_sigs), network_service_uuid)
    )
    canvas_sig = group(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def update_latest_ssl_support_reports_for_organization(self, org_uuid=None):
    """
    Update all of the ssl support reports for the given organization based on the current state of the SSL support
    inspector.
    :param org_uuid: The UUID of the organization to update SSL support reports for.
    :return: None
    """
    logger.info(
        "Now updating all of the latest SSL support reports for organization %s."
        % (org_uuid,)
    )
    report_ids = get_latest_ssl_support_report_ids(org_uuid)
    logger.info(
        "Total of %s SSL support reports found for organization %s."
        % (len(report_ids), org_uuid)
    )
    task_sigs = []
    for report_id in report_ids:
        task_sigs.append(update_latest_ssl_support_report_for_organization.si(
            doc_id=report_id,
            org_uuid=org_uuid,
            is_latest=True,
        ))
    canvas_sig = group(task_sigs)
    logger.info(
        "Now kicking off %s tasks to update SSL support reports for organization %s."
        % (len(task_sigs), org_uuid)
    )
    self.finish_after(signature=canvas_sig)
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def enumerate_vulnerabilities_for_ssl_service(
        self,
        org_uuid=None,
        network_service_uuid=None,
        network_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Enumerate all of the SSL-based vulnerabilities for the given SSL/TLS service.
    :param org_uuid: The UUID of the organization to enumerate SSL vulnerabilities on behalf of.
    :param network_service_uuid: The UUID of the network service that is being scanned.
    :param network_service_scan_uuid: The UUID of the network service scan that this enumeration is
    a part of.
    :return: None
    """
    logger.info(
        "Now enumerating SSL vulnerabilities for network service %s."
        % (network_service_uuid,)
    )
    task_sigs = []
    command_map = get_ssl_vulnerabilities_command_map()
    for command_name in command_map.keys():
        task_sigs.append(test_ssl_service_for_ssl_vulnerability.si(
            org_uuid=org_uuid,
            network_service_uuid=network_service_uuid,
            network_service_scan_uuid=network_service_scan_uuid,
            vulnerability_name=command_name,
            order_uuid=order_uuid,
        ))
    canvas_sig = group(task_sigs)
    logger.info(
        "Now kicking off %s tasks to inspect network service %s for SSL vulnerabilities."
        % (len(task_sigs), network_service_uuid)
    )
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def redo_ssl_support_inspection_for_organization(self, org_uuid=None):
    """
    Perform SSL support inspection for all of the network services associated with the given organization
    again.
    :param org_uuid: The UUID of the organization to re-do SSL support inspection for.
    :return: None
    """
    logger.info(
        "Now redo'ing SSL support inspection for organization %s."
        % (org_uuid,)
    )
    network_service_scan_uuids = get_latest_network_service_scan_uuids_for_organization(
        org_uuid=org_uuid,
        db_session=self.db_session,
    )
    task_sigs = []
    for network_service_scan_uuid in network_service_scan_uuids:
        task_sigs.append(redo_ssl_support_inspection_for_network_service_scan.si(
            network_service_scan_uuid=network_service_scan_uuid,
        ))
    canvas_sig = group(task_sigs)
    logger.info(
        "Now kicking off %s tasks to redo SSL inspection for organization %s."
        % (len(task_sigs), org_uuid)
    )
    self.finish_after(signature=canvas_sig)
项目:VkGraph    作者:Djaler    | 项目源码 | 文件源码
def get_friends_ids_batch(user_ids: List[int]) -> List[List[int]]:
    job = group([tasks.get_friends_ids_batch.s(chunk) for chunk in
                 chunks(user_ids, 75)])

    result = job.apply_async().join()

    full_result = []

    for list_ in result:
        full_result.extend(list_)

    return full_result
项目:VkGraph    作者:Djaler    | 项目源码 | 文件源码
def get_mutual_friends_ids_batch(user_ids: List[int],
                                 my_id: int) -> Dict[int, List[int]]:
    job = group(
        [tasks.get_mutual_friends_ids_batch.s(chunk, my_id) for chunk in
         chunks(user_ids, 75)])

    result = job.apply_async().join()

    full_result = {int(key): value for dictionary in result for key, value in
                   dictionary.items()}

    return full_result
项目:CA-NEAT    作者:mathiasose    | 项目源码 | 文件源码
def initialize_generation(db_path: str, scenario_id: int, generation: int, genotypes: List[Genome],
                          pair_selection_f: PAIR_SELECTION_F_T, fitness_f: FITNESS_F_T, neat_config: CPPNNEATConfig,
                          ca_config: CAConfig, innovation_archive: List[Genome]) -> None:
    from celery import group, chord

    serialized = {}
    distances = {}

    k = 15

    concurrent_tasks = []
    for i, gt in enumerate(genotypes):
        l = []
        for other_gt in genotypes + innovation_archive:
            if other_gt is gt:
                continue

            key = tuple(sorted((gt, other_gt), key=id))

            if key in distances:
                pass
            else:
                if gt not in serialized:
                    pt = create_feed_forward_phenotype(gt)
                    _, serialized[gt] = serialize_cppn_rule(cppn=pt, ca_config=ca_config)

                if other_gt not in serialized:
                    pt = create_feed_forward_phenotype(other_gt)
                    _, serialized[other_gt] = serialize_cppn_rule(cppn=pt, ca_config=ca_config)

                a = serialized[gt]
                b = serialized[other_gt]
                distances[key] = hamming(a, b, normalized=True)

            l.append(distances[key])

        gt.fitness = sum(sorted(l)[:k]) / k

        concurrent_tasks.append(handle_individual.s(
            scenario_id=scenario_id,
            generation=generation,
            individual_number=i,
            genotype=gt,
            fitness_f=fitness_f,
            ca_config=ca_config,
        ))

    final_task = persist_results.subtask(
        args=(db_path, scenario_id, generation, fitness_f, pair_selection_f, neat_config, ca_config)
    )

    chord(group(concurrent_tasks), final_task)()
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def lazy_async_apply_map(self, items, d, runner):
    """Make new instances of runner for each item in items, and inject that
    group into the chord that executed this task.

    NB This task does not work with eager results
    NB This task does not work with celery==3.1, only on master

    :param items: itterable of arguments for the runner
    :param d: data to operate on (probably returned by a previous task)
    :param runner: task signature to execute on each item. def runner(item, data, *a, **kw)
    """

    subtasks = []
    for item in items:
        r = runner.clone()
        r.args = (item, d) + r.args
        subtasks.append(r)
    g = group(*subtasks)

    if self.request.is_eager:
        # Maybe this works - sometimes, if the argument count is right
        return g.apply().get()

    try:
        # Celery master (>= 3.2)
        raise self.replace(g)
    except AttributeError:
        pass

    # Try to do it ourselves for celery == 3.1
    # FIXME - not quite working

    # TODO - a bit hacky, reducer should be parameterized
    g = group(*subtasks) | generator.s().set(
        # task_id=self.request.id,
        chord=self.request.chord,
    )
    # | dict_reducer.s().set(
    #     task_id=self.request.id,
    #     chord=self.request.chord,
    #     reply_to=self.request.reply_to,
    # )

    # Replace running task with the group
    # inspired by task.replace from Celery master (3.2)
    g.freeze(
        self.request.id,
        group_id=self.request.group,
        # chord=self.request.chord,
        # reply_to=self.request.reply_to,
    )
    g.delay()
    raise Ignore('Chord member replaced by new task')
项目:django-user-tasks    作者:edx    | 项目源码 | 文件源码
def _create_chord(self, eager):
        """Create a celery chord and verify some assertions about the corresponding status records"""
        chord([
            sample_task.s(self.user.id, '1'),
            sample_task.s(self.user.id, '2', user_task_name='Chord: 1 & 2, then 3')
        ])(sample_task.s(self.user.id, '3'))
        assert UserTaskStatus.objects.count() == 5
        chord_status = UserTaskStatus.objects.get(task_class='celery.chord')
        assert chord_status.task_id
        assert chord_status.parent is None
        assert chord_status.is_container
        assert chord_status.name == 'Chord: 1 & 2, then 3'
        assert chord_status.total_steps == 3
        verify_state(chord_status, eager)

        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id
        assert group_status.parent_id == chord_status.id
        assert group_status.is_container
        assert group_status.name == 'Chord: 1 & 2, then 3'
        assert group_status.total_steps == 2
        verify_state(group_status, eager)

        header_tasks = UserTaskStatus.objects.filter(parent=group_status)
        assert len(header_tasks) == 2
        for status in header_tasks:
            assert status.task_id
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, eager)

        body_status = UserTaskStatus.objects.get(parent=chord_status, is_container=False)
        assert body_status.task_id
        assert body_status.name == 'SampleTask: 3'
        assert body_status.total_steps == 1
        verify_state(body_status, eager)
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def enumerate_subdomains_for_domain(
        self,
        org_uuid=None,
        domain_uuid=None,
        domain_name=None,
        domain_scan_uuid=None,
        order_uuid=None,
):
    """
    Enumerate subdomains for the given domain name and associate the results with the given domain UUID.
    :param org_uuid: The UUID of the organization to perform the task for.
    :param domain_uuid: The UUID of the parent domain that this subdomain scan is invoked on behalf of.
    :param domain_name: The domain name to enumerate subdomains for.
    :param order_uuid: The UUID of the order that this enumeration is associated with.
    :return: None
    """
    logger.info(
        "Now enumerating subdomains for domain name %s (parent domain %s)."
        % (domain_name, domain_uuid)
    )
    try:
        parent_domain = get_parent_domain_for_subdomain_discovery(domain_name)
    except UnsupportedTldException:
        logger.warning(
            "The domain %s contains a TLD that we do not support."
            % (domain_name,)
        )
        return
    task_sigs = []
    discovery_sigs = []
    task_kwargs = {
        "org_uuid": org_uuid,
        "domain_uuid": domain_uuid,
        "domain_scan_uuid": domain_scan_uuid,
        "parent_domain": parent_domain,
        "order_uuid": order_uuid,
    }
    discovery_sigs.append(enumerate_subdomains_by_dnsdb.si(**task_kwargs))
    task_sigs.append(group(discovery_sigs))
    task_sigs.append(create_and_inspect_domains_from_subdomain_enumeration.si(**task_kwargs))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def create_and_inspect_domains_from_subdomain_enumeration(
        self,
        org_uuid=None,
        domain_uuid=None,
        domain_scan_uuid=None,
        parent_domain=None,
        order_uuid=None,
):
    """
    Process the contents of all subdomain enumerations for the given domain name scan, create new domains
    for those subdomains that are new, and invoke scans for the domains as necessary.
    :param org_uuid: The UUID of the organization that subdomains were enumerated for.
    :param domain_uuid: The UUID of the domain name related to this inspection.
    :param domain_scan_uuid: The UUID of the domain name scan that this enumeration is a part of.
    :param parent_domain: The parent domain that was queried.
    :return: None
    """
    logger.info(
        "Now creating an inspecting domains from subdomain enumeration of parent domain %s."
        % (parent_domain,)
    )
    self.wait_for_es()
    subdomains = get_all_subdomains_from_domain_scan_enumeration(
        org_uuid=org_uuid,
        parent_domain=parent_domain,
        domain_scan_uuid=domain_scan_uuid,
    )
    task_sigs = []
    for subdomain in subdomains:
        domain_name = get_or_create_domain_name_for_organization(
            db_session=self.db_session,
            name=subdomain,
            added_by="subdomain_enum",
            org_uuid=org_uuid,
        )
        self.db_session.add(domain_name)
        do_scan = check_domain_name_scanning_status(
            db_session=self.db_session,
            domain_uuid=domain_name.uuid,
            update_status=False,
        )
        if do_scan:
            task_sigs.append(scan_domain_name.si(
                org_uuid=org_uuid,
                domain_uuid=domain_name.uuid,
                enumerate_subdomains=False,
            ))
    self.db_session.commit()
    canvas_sig = group(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def scan_ip_addresses_for_domain_name_scan(
        self,
        org_uuid=None,
        domain_uuid=None,
        domain_scan_uuid=None,
        order_uuid=None,
):
    """
    Kick off tasks for scanning all of the IP addresses discovered during the given domain name scan.
    :param org_uuid: The UUID of the organization to scan endpoints for.
    :param domain_uuid: The UUID of the domain name that was scanned.
    :param domain_scan_uuid: The UUID of the domain name scan to kick off endpoint scanning tasks
    for.
    :return: None
    """
    logger.info(
        "Now kicking off all tasks for scanning IP addresses associated with domain %s."
        % (domain_uuid,)
    )
    ip_addresses = get_ip_addresses_from_domain_name_scan(domain_scan_uuid=domain_scan_uuid, org_uuid=org_uuid)
    if len(ip_addresses) == 0:
        logger.info(
            "No IP addresses discovered for domain %s during scan %s."
            % (domain_uuid, domain_scan_uuid)
        )
        return
    task_sigs = []
    domain = self.domain
    for ip_address in ip_addresses:
        ip_address_model = get_ip_address_for_organization(
            db_session=self.db_session,
            org_uuid=org_uuid,
            ip_address=ip_address,
        )
        domain.ip_addresses.append(ip_address_model)
        task_sigs.append(scan_ip_address.si(
            org_uuid=org_uuid,
            ip_address_uuid=ip_address_model.uuid,
            order_uuid=order_uuid,
        ))
    group(task_sigs).apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def scan_ip_address_for_network_services(
        self,
        org_uuid=None,
        ip_address_uuid=None,
        ip_address_scan_uuid=None,
        order_uuid=None,
):
    """
    Scan the given IP address to determine what network services are live on the host.
    :param org_uuid: The UUID of the organization to perform data retrieval on behalf of.
    :param ip_address_uuid: The UUID of the IP address to retrieve data about.
    :param ip_address_scan_uuid: The UUID of the IP address scan to associate retrieved data with.
    :return: None
    """
    logger.info(
        "Now scanning IP address %s for live network services."
        % (ip_address_uuid,)
    )
    task_sigs = []
    tcp_ports = get_tcp_ports_to_scan_for_scan_config(config_uuid=self.scan_config.uuid, db_session=self.db_session)
    if len(tcp_ports) > 0:
        task_sigs.append(scan_ip_address_for_tcp_network_services.si(
            org_uuid=org_uuid,
            ip_address_uuid=ip_address_uuid,
            ip_address_scan_uuid=ip_address_scan_uuid,
            ports=tcp_ports,
            order_uuid=order_uuid,
        ))
    udp_ports = get_udp_ports_to_scan_for_scan_config(config_uuid=self.scan_config.uuid, db_session=self.db_session)
    if len(udp_ports) > 0:
        task_sigs.append(scan_ip_address_for_udp_network_services.si(
            org_uuid=org_uuid,
            ip_address_uuid=ip_address_uuid,
            ip_address_scan_uuid=ip_address_scan_uuid,
            ports=udp_ports,
            order_uuid=order_uuid,
        ))
    if len(task_sigs) == 0:
        logger.info(
            "No ports were included to scan for the organization (%s)."
            % (org_uuid,)
        )
        return
    if len(task_sigs) > 1:
        scanning_sig = group(task_sigs)
    else:
        scanning_sig = task_sigs[0]
    self.finish_after(signature=scanning_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def inspect_network_services_from_ip_address(
        self,
        org_uuid=None,
        ip_address_uuid=None,
        ip_address_scan_uuid=None,
        order_uuid=None,
):
    """
    Kick off all of the necessary tasks to inspect the live network services associated with the given IP
    address.
    :param org_uuid: The UUID of the organization to perform data retrieval on behalf of.
    :param ip_address_uuid: The UUID of the IP address to retrieve data about.
    :param ip_address_scan_uuid: The UUID of the IP address scan to associate retrieved data with.
    :return: None
    """
    logger.info(
        "Now kicking off all tasks to inspect network services on IP address %s."
        % (ip_address_uuid,)
    )
    self.wait_for_es()
    open_ports = get_open_ports_from_ip_address_scan(ip_address_scan_uuid=ip_address_scan_uuid, org_uuid=org_uuid)
    task_sigs = []
    for port_number, port_protocol in open_ports:
        network_service = get_or_create_network_service_from_org_ip(
            ip_uuid=ip_address_uuid,
            port=port_number,
            protocol=port_protocol,
            db_session=self.db_session,
            discovered_by="ip address scan",
        )
        task_sigs.append(scan_network_service.si(
            org_uuid=org_uuid,
            network_service_uuid=network_service.uuid,
            check_liveness=False,
            liveness_cause="ip address scan",
            order_uuid=order_uuid,
        ))
    if len(task_sigs) == 0:
        logger.info(
            "No network services were found to be open for IP address %s."
            % (ip_address_uuid,)
        )
        return
    group(task_sigs).apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def handle_order_completion(self, order_uuid=None, retry_interval=10, completion_count=1):
    """
    Check to see if the order associated with the given UUID has completed and, if it has, handle the completion
    of the order.
    :param order_uuid: The UUID of the order to check on.
    :param retry_interval: The time (in seconds) between checking on whether or not the referenced
    order has completed.
    :param completion_count: The number of outstanding tasks associated with an order that should indicate
    that the order has finished.
    :return: None
    """
    logger.info(
        "Now checking to see if order %s has completed."
        % (order_uuid,)
    )
    order_uuid_value = int(self.redis_helper.get(order_uuid))
    if order_uuid_value == completion_count:
        logger.info(
            "Order %s has completed!"
            % (order_uuid,)
        )
        scan_config = self.scan_config
        task_sigs = []
        if scan_config.completion_email_org_users:
            org = self.order.organization
            task_sigs.append(email_org_users_for_order_completion.si(
                order_uuid=order_uuid,
                org_uuid=org.uuid,
                org_name=org.name,
            ))
        elif scan_config.completion_email_order_user:
            org = self.order.organization
            task_sigs.append(email_order_user_for_order_completion.si(
                order_uuid=order_uuid,
                org_uuid=org.uuid,
                org_name=org.name,
            ))
        if scan_config.completion_web_hook_url:
            task_sigs.append(request_web_hook_for_order_completion.si(order_uuid=order_uuid))
        if len(task_sigs) > 0:
            canvas_sig = group(task_sigs)
            logger.info(
                "Now kicking off %s tasks to handle the completion of order %s."
                % (len(task_sigs), order_uuid)
            )
            self.finish_after(signature=canvas_sig)
        else:
            logger.info(
                "No tasks to run in response to completion of order %s."
                % (order_uuid,)
            )
    else:
        logger.info(
            "Order %s has not completed yet (%s tasks currently outstanding)."
            % (order_uuid, order_uuid_value,)
        )
        raise self.retry(countdown=retry_interval)


#USED