Python celery 模块,chain() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用celery.chain()

项目:pia    作者:soasme    | 项目源码 | 文件源码
def async_run_pipe(jsondata, programs, env=None):
    """ Run an sequence of jobs asynchronously.

    :param jsondata: a dictionary that will be sent as request body.
    :param program: a dictionary that defined request meta and parameter.
    :param env: a dictionary that contained environments.
    :return: AsyncResult, a chaining celery task.
    """
    env = env or {}

    # assemble celery tasks
    tasks = []
    tasks.append(async_run_job.s(jsondata, programs[0], env))
    for prog in programs[1:]:
        tasks.append(async_run_job.s(prog, env))

    # chaining run jobs using celery
    return chain(*tasks)()
项目:PilosusBot    作者:pilosus    | 项目源码 | 文件源码
def celery_chain(parsed_update):
    """
    Celery chain of tasks, one task in the chain is executed after the previous one is done.

    :param parsed_update: dict
    :return: dict (with 'status_code' and 'status' keys)
    """
    chain_result = chain(assess_message_score.s(parsed_update),
                         select_db_sentiment.s(),
                         send_message_to_chat.s()).apply_async()
    return chain_result


# assess queue
项目:falsy    作者:pingf    | 项目源码 | 文件源码
def loads(payload):
    if payload.get('type') != 'normal':
        raise Exception('celery task loader only support normal mode')
    tasks = payload.get('tasks', [])
    cts = []
    for task in tasks:
        ops = [load(id, task.get('args'), task.get('on_error')) if i == 0 else load(id, None, task.get('on_error')) for
               i, id in enumerate(task['ids'])]
        cts.append(chain(ops))
    callback = payload.get('callback')
    if callback:
        return chord(header=group(cts), body=func.load(callback).s())
    return group(cts)
项目:instanotifier    作者:AlexanderKaluzhny    | 项目源码 | 文件源码
def test_consume_feed_task_chaining():
    """ Consume test rss feed through connected fetcher and parser and publisher tasks.
        Make sure the parser have created the RssNotification instances, and
        all the messages were sent.

        It should be run from under the shell/script.
    """

    from celery import chain
    from instanotifier.fetcher.tasks import fetch
    from instanotifier.parser.tasks import parse
    from instanotifier.fetcher.rss.utils import _rss_file_path

    with TestFeedSourceAutoCleanupContext() as context:
        original_notification_count = RssNotification.objects.count()
        print 'Original notifications count: %s' % (original_notification_count)

        task_flow = chain(fetch.s(_rss_file_path()), parse.s(), publish.s(context.feedsource_pk))
        task_flow.delay().get()

        actual_notification_count = RssNotification.objects.count()
        print 'Actual notifications count: %s' % (actual_notification_count)

        assert (actual_notification_count > original_notification_count)
项目:instanotifier    作者:AlexanderKaluzhny    | 项目源码 | 文件源码
def test_consume_feed():
    """ Consume test rss feed through connected fetcher and parser tasks.
        And make sure the parser have created the RssNotification instances.
        It should be run from under the shell/script.
    """

    from celery import chain
    from instanotifier.fetcher.tasks import fetch
    from instanotifier.fetcher.rss.utils import _rss_file_path

    original_notification_count = RssNotification.objects.count()
    print 'Original notifications count: %s' % (original_notification_count)

    task_flow = chain(fetch.s(_rss_file_path()), tasks.parse.s())
    saved_pks = task_flow.delay().get()

    actual_notification_count = RssNotification.objects.count()
    print 'Actual notifications count: %s' % (actual_notification_count)

    print 'Number of saved notifications: %s' % (len(saved_pks))
    assert (len(saved_pks) == actual_notification_count - original_notification_count)
项目:django-user-tasks    作者:edx    | 项目源码 | 文件源码
def _create_chain(self, eager):
        """Create a celery chain and verify some assertions about the corresponding status records"""
        chain(sample_task.si(self.user.id, '1'),
              sample_task.si(self.user.id, '2', user_task_name='Chain: 1, 2, 3'),
              sample_task.si(self.user.id, '3'),
              normal_task.si('Argument')).delay()
        assert UserTaskStatus.objects.count() == 4
        chain_status = UserTaskStatus.objects.get(task_class='celery.chain')
        assert chain_status.task_id
        assert chain_status.parent is None
        assert chain_status.is_container
        assert chain_status.name == 'Chain: 1, 2, 3'
        assert chain_status.total_steps == 3
        verify_state(chain_status, eager)

        children = UserTaskStatus.objects.filter(parent=chain_status)
        assert len(children) == 3
        for status in children:
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2', 'SampleTask: 3']
            assert status.total_steps == 1
            verify_state(status, eager)
项目:kostyor    作者:Mirantis    | 项目源码 | 文件源码
def start(self):
        subtasks = [self.driver.pre_upgrade()]
        hosts = dbapi.get_hosts_by_cluster(self._upgrade['cluster_id'])

        # We may have plenty controllers each with various set of services.
        # In order to orchestrate upgrades properly, we need to iterate
        # by them in right order. For example, first goes controllers with
        # keystone, then with nova, and so on. See iteration details in
        # get_controllers() docstring.
        for host in iterhosts(hosts):
            for service in iterservices(host):
                subtasks.append(self.driver.start(service, [host]))

        # Execute gathered tasks one-by-one preserving order. Please note,
        # that it doesn't mean there can't be parallel execution since driver
        # may return a Celery group of tasks instead, and in that case the
        # group will be executed instead. The idea is to run one-by-one what
        # was returned by driver regardless whether it's a task, a group or
        # a chain.
        supertask = celery.chain(*subtasks)
        supertask.apply_async()
项目:kostyor    作者:Mirantis    | 项目源码 | 文件源码
def start(self):
        subtasks = [self.driver.pre_upgrade()]

        for project in SCENARIO:
            for service in project.services:
                service, hosts = dbapi.get_service_with_hosts(
                    service.name,
                    self._upgrade['cluster_id'])

                # SCENARIO may contain services that are not deployed in
                # current setup. Hence, attempt to return its instance
                # and hosts will return None, and we have no choice but
                # ignore it and continue.
                if service:
                    subtasks.append(self.driver.start(service, hosts))

        # Execute gathered tasks one-by-one preserving order. Please note,
        # that it doesn't mean there can't be parallel execution since driver
        # may return a Celery group of tasks instead, and in that case the
        # group will be executed instead. The idea is to run one-by-one what
        # was returned by driver regardless whether it's a task, a group or
        # a chain.
        supertask = celery.chain(*subtasks)
        supertask.apply_async()
项目:posm-imagery-api    作者:mojodna    | 项目源码 | 文件源码
def initialize_imagery(id, source_path):
    return chain(
        place_file.si(id, source_path),
        create_metadata.si(id),
        chord([create_overviews.si(id), create_warped_vrt.si(id)],
              chain(
                update_metadata.si(id),
                cleanup_ingestion.si(id),
              )
        ),
    )
项目:odl-video-service    作者:mitodl    | 项目源码 | 文件源码
def process_dropbox_data(dropbox_upload_data):
    """
    Takes care of processing a list of videos to be uploaded from dropbox

    Args:
        dropbox_links_list (dict): a dictionary containing the collection key and a list of dropbox links

    Returns:
        list: A list of dictionaries containing informations about the videos
    """
    collection_key = dropbox_upload_data['collection']
    dropbox_links_list = dropbox_upload_data['files']
    collection = get_object_or_404(models.Collection, key=collection_key)
    response_data = {}
    for dropbox_link in dropbox_links_list:
        with transaction.atomic():
            video = models.Video.objects.create(
                source_url=dropbox_link["link"],
                title=dropbox_link["name"],
                collection=collection,
            )
            models.VideoFile.objects.create(
                s3_object_key=video.get_s3_key(),
                video_id=video.id,
                bucket_name=settings.VIDEO_S3_BUCKET
            )
        # Kick off chained async celery tasks to transfer file to S3, then start a transcode job
        task_result = chain(
            tasks.stream_to_s3.s(video.id),
            tasks.transcode_from_s3.si(video.id)
        )()

        response_data[video.hexkey] = {
            "s3key": video.get_s3_key(),
            "title": video.title,
            "task": task_result.id,
        }
    return response_data
项目:instanotifier    作者:AlexanderKaluzhny    | 项目源码 | 文件源码
def test_consume_feed_task_chaining():
    """ Consume test rss feed through connected fetcher and parser and publisher tasks.
        Make sure the parser have created the RssNotification instances, and
        all the messages were sent.

        It should be run from under the django shell/script.
    """
    delete_test_rss_feed_notifications()

    task_flow = chain(fetch.s(), parse.s(), publish.s())
    task_flow.delay(_rss_file_path()).get()
项目:instanotifier    作者:AlexanderKaluzhny    | 项目源码 | 文件源码
def _get_chain(feedsource_pk):
    return chain(fetch.s(), parse.s(), publish.s(feedsource_pk))
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def test():
    # print add.delay(2, 2)

    from celery import group, chain

    job_info = {
        'Name': '{task_name}{task_args}',
        'BatchName': 'celery-{root_id}'
    }

    job = group([add.s(2, 2), add.s(4, 4)])
    result = job.apply_async(job_info=job_info)
    print("waiting for results:")
    for x in result.iterate(propagate=False):
        print("result is:" % x)
项目:celery-deadline    作者:chadrik    | 项目源码 | 文件源码
def test_mixed():
    from celery import chain
    from celery_deadline import job

    task = chain(
        job('Python', '1-2',
            plugin_info=dict(
                ScriptFile='/Users/chad/python/untitled.py',
                Version='2.7')),
        sum.s()  # <-- will go to a celery worker
    )
    result = task.apply_async()
    print("waiting for results:")
    print(result.get())
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def run(self, args=None, kwargs=None, **options):
        """
        Executes the pipeline and returns the chain of tasks used.

        Evaluates all the after parameters from the pipeline decorators to create
        a DAG, and then partial ordering that ensures all prerequisites are met
        before launching tasks.

        The return value of each task in the pipeline is provided to the next
        task as its first positional argument.

        By tagging pipeline elements in their decorators, you can choose which
        elements should be run by :meth:`run`.

        :param list args: Arguments passed as an input to the kickoff (first) task
                          in the pipeline
        :param dict kwargs: Keyword arguments passed as an input to the kickoff
                            (first) task in the pipeline
        :param list tagged_as: Execute only tasks with no tags and those tagged as
                               specified using `tags` parameters to :func:`@pipeline`
        :param dict required_kwargs: Keyword arguments that :func:`@pipeline`
                                     elements might require.

        :returns: :class:`celery.AsyncResult`
        :raises: :exc:`DependencyError`
        """
        tasks = self._get_pipeline(**options)
        return tasks.apply_async(args=args, kwargs=kwargs)
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def _get_pipeline(self, **options):
        tagged_as = options.pop('tagged_as', [])
        required_kwargs = options.pop('required_kwargs', {})

        # get tasks for default tag
        # Explicit dict for copy? amcm
        tasks = dict(self.registry[_sentinel])

        # override tasks by adding tasks in correct order
        for tag in tagged_as:
            if tag not in self.registry:
                raise ValueError('No pipelines for a tag {}'.format(tag))
            tasks.update(self.registry[tag])

        # now that we have the tasks, figure out the order of tasks
        tree = self.build_tree(tasks)

        # Make the signatures, so we can call the tasks
        self.add_signatures_to_graph(
            tree,
            required_kwargs,
        )

        # Reduce the tree by dependencies to task chain(s)
        celery_tasks = self.get_task_to_run(tree)

        # Chain to the final task if needed
        final = self.get_end_task(tasks, required_kwargs)
        if final is not None:
            celery_tasks |= final
        return celery_tasks
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def get_task_to_run(self, tree):
        """Working from the bottom up, replace each node with a chain to its
        descendant, or celery.Group of descendants.

        :param tree: Dependancy graph of tasks
        :type tree: networkx.DiGraph

        :returns: chain to execute
        """

        # TODO: This could be more parallel
        return chain(*[
            maybe_signature(tree.node[name]['task'], self.celery_app)
            for name in nx.topological_sort(tree)
        ])
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def apply_flags_to_ip_address_scan(
        self,
        org_uuid=None,
        ip_address_uuid=None,
        ip_address_scan_uuid=None,
        order_uuid=None,
):
    """
    Apply all of the relevant flags to the data collected during the given IP address scan.
    :param org_uuid: The UUID of the organization that flags are being applied for.
    :param ip_address_uuid: The UUID of the IP address that was scanned.
    :param ip_address_scan_uuid: The UUID of the IP address scan.
    :return: None
    """
    logger.info(
        "Now applying flags to IP address scan %s."
        % (ip_address_scan_uuid,)
    )
    flags = get_all_ip_flags_for_organization(org_uuid=org_uuid, db_session=self.db_session)
    if len(flags) == 0:
        logger.info(
            "No IP address flags found for organization %s."
            % (org_uuid,)
        )
        return
    task_sigs = []
    for flag in flags:
        flag_type = "default" if isinstance(flag, DefaultFlag) else "organization"
        task_sigs.append(apply_flag_to_ip_address_scan.si(
            org_uuid=org_uuid,
            ip_address_uuid=ip_address_uuid,
            ip_address_scan_uuid=ip_address_scan_uuid,
            flag_uuid=flag.uuid,
            flag_type=flag_type,
            order_uuid=order_uuid,
        ))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def zmap_scan_order(self, order_uuid=None):
    """
    Perform Zmap scans for all necessary ports for the given order.
    :param order_uuid: The UUID of the order to scan.
    :return: None
    """
    port_tuples = get_ports_to_scan_for_scan_config(
        config_uuid=self.scan_config.uuid,
        db_session=self.db_session,
    )
    logger.info(
        "Now scanning order %s for %s total ports."
        % (order_uuid, len(port_tuples))
    )
    task_signatures = []
    scan_signatures = []
    network_scan = create_network_scan_for_organization(
        db_session=self.db_session,
        org_uuid=self.org_uuid,
    )
    self.commit_session()
    for port, protocol in port_tuples:
        scan_signatures.append(zmap_scan_order_for_port.si(
            port=port,
            protocol=protocol,
            order_uuid=order_uuid,
            network_scan_uuid=network_scan.uuid,
        ))
    task_signatures.append(group(scan_signatures))
    task_signatures.append(update_zmap_scan_completed.si(
        scan_uuid=network_scan.uuid,
        org_uuid=self.org_uuid,
        order_uuid=order_uuid,
    ))
    logger.info("Kicking off Zmap subtasks now.")
    canvas_sig = chain(task_signatures)
    canvas_sig.apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def discover_virtual_hosts_for_web_service(
        self,
        org_uuid=None,
        network_service_scan_uuid=None,
        network_service_uuid=None,
        use_ssl=None,
        order_uuid=None,
):
    """
    Discover all of the virtual hosts for the given web service.
    :param org_uuid: The organization to discover virtual hosts on behalf of.
    :param network_service_scan_uuid: The UUID of the network service scan that this virtual host discovery is
    a part of.
    :param network_service_uuid: The UUID of the network service where the web service resides.
    :param use_ssl: Whether or not to use SSL to interact with the remote web service.
    :return: None
    """
    logger.info(
        "Now discovering virtual hosts for network service %s. Organization is %s, scan is %s."
        % (network_service_uuid, org_uuid, network_service_scan_uuid)
    )
    task_sigs = []
    task_kwargs = {
        "org_uuid": org_uuid,
        "network_service_uuid": network_service_uuid,
        "network_service_scan_uuid": network_service_scan_uuid,
        "use_ssl": use_ssl,
        "order_uuid": order_uuid,
    }
    task_sigs.append(fingerprint_virtual_hosts.si(**task_kwargs))
    task_sigs.append(assess_virtual_host_fingerprints.si(**task_kwargs))
    logger.info(
        "Now kicking off virtual host fingerprinting and assessment tasks for scan %s, organization %s."
        % (network_service_scan_uuid, org_uuid)
    )
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def apply_flags_to_web_service_scan(
        self,
        org_uuid=None,
        web_service_uuid=None,
        web_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Apply all of the relevant flags to the data collected during the given web service scan.
    :param org_uuid: The UUID of the organization that flags are being applied for.
    :param web_service_uuid: The UUID of the web service that was scanned.
    :param web_service_scan_uuid: The UUID of the web service scan to update data for.
    :return: None
    """
    logger.info(
        "Now applying flags to web service scan %s."
        % (web_service_scan_uuid,)
    )
    flags = get_all_web_flags_for_organization(db_session=self.db_session, org_uuid=org_uuid)
    if len(flags) == 0:
        logger.info(
            "No web flags found for organization %s."
            % (org_uuid,)
        )
        return
    task_sigs = []
    for flag in flags:
        flag_type = "default" if isinstance(flag, DefaultFlag) else "organization"
        task_sigs.append(apply_flag_to_web_service_scan.si(
            org_uuid=org_uuid,
            web_service_uuid=web_service_uuid,
            web_service_scan_uuid=web_service_scan_uuid,
            flag_uuid=flag.uuid,
            flag_type=flag_type,
            order_uuid=order_uuid,
        ))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def inspect_service_application(
        self,
        org_uuid=None,
        network_service_scan_uuid=None,
        network_service_uuid=None,
        order_uuid=None,
):
    """
    Inspect the applications residing on the remote service.
    :param org_uuid: The UUID of the organization to inspect the service on behalf of.
    :param network_service_scan_uuid: The UUID of the NetworkScan that invoked this task.
    :param network_service_uuid: The UUID of the NetworkService to inspect.
    :return: None
    """
    logger.info(
        "Inspecting service %s for organization %s. Network scan was %s."
        % (network_service_uuid, org_uuid, network_service_scan_uuid)
    )
    task_signatures = []
    protocol = self.network_service.protocol.lower()
    if protocol == "tcp":
        task_signatures.append(inspect_tcp_service_application.si(
            org_uuid=org_uuid,
            network_service_scan_uuid=network_service_scan_uuid,
            network_service_uuid=network_service_uuid,
            order_uuid=order_uuid,
        ))
    else:
        raise UnsupportedProtocolError("No support for service inspection with protocol %s." % (protocol,))
    canvas_sig = chain(task_signatures)
    canvas_sig.apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def redo_ssl_support_inspection_for_network_service_scan(self, network_service_scan_uuid=None):
    """
    Perform SSL support inspection for the given network service scan again.
    :param network_service_scan_uuid: The UUID of the network service scan to perform SSL support inspection
    for.
    :return: None
    """
    from .base import update_network_service_scan_elasticsearch
    logger.info(
        "Now redo'ing SSL support inspection for network service scan %s."
        % (network_service_scan_uuid,)
    )
    org_uuid, network_service_uuid = get_related_uuids_from_network_service_scan(
        network_service_scan_uuid=network_service_scan_uuid,
        db_session=self.db_session,
    )
    delete_ssl_inspection_documents_for_network_service_scan(
        org_uuid=org_uuid,
        network_service_scan_uuid=network_service_scan_uuid,
    )
    task_sigs = []
    task_sigs.append(inspect_tcp_service_for_ssl_support.si(
        org_uuid=org_uuid,
        network_service_uuid=network_service_uuid,
        network_service_scan_uuid=network_service_scan_uuid,
    ))
    task_sigs.append(update_network_service_scan_elasticsearch.si(
        network_service_scan_uuid=network_service_scan_uuid,
        org_uuid=org_uuid,
        network_service_uuid=network_service_uuid,
    ))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def apply_flags_to_ssl_support_scan(
        self,
        org_uuid=None,
        network_service_uuid=None,
        network_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Apply all of the necessary flags to the results of data gathered during the given SSL support scan.
    :param org_uuid: The UUID of the organization that owns the network service.
    :param network_service_uuid: The UUID of the network service that was scanned.
    :param network_service_scan_uuid: The UUID of the network service scan that this task is a part of.
    :return: None
    """
    logger.info(
        "Now applying flags to SSL support scan %s."
        % (network_service_uuid,)
    )
    flags = get_all_ssl_flags_for_organization(org_uuid=org_uuid, db_session=self.db_session)
    if len(flags) == 0:
        logger.info(
            "No SSL flags found for organization %s."
            % (org_uuid,)
        )
        return
    task_sigs = []
    for flag in flags:
        flag_type = "default" if isinstance(flag, DefaultFlag) else "organization"
        task_sigs.append(apply_flag_to_ssl_support_scan.si(
            org_uuid=org_uuid,
            network_service_uuid=network_service_uuid,
            network_service_scan_uuid=network_service_scan_uuid,
            flag_uuid=flag.uuid,
            flag_type=flag_type,
            order_uuid=order_uuid,
        ))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:kostyor-openstack-ansible    作者:Mirantis    | 项目源码 | 文件源码
def pre_upgrade(self):
        utilities = os.path.join(
            self._root, 'scripts', 'upgrade-utilities', 'playbooks')
        playbooks = os.path.join(self._root, 'playbooks')

        # According to the upgrade document, there are steps that must be
        # executed before trying to upgrade OpenStack to new version. This
        # this hook returns a chain of this steps so operator doesn't need
        # to run them manually.
        #
        # http://docs.openstack.org/developer/openstack-ansible/upgrade-guide/manual-upgrade.html
        return celery.chain(*[

            # Bootstrapping Ansible again ensures that all OpenStack Ansible
            # role dependencies are in place before running playbooks of new
            # release.
            tasks.execute.si(
                os.path.join(self._root, 'scripts', 'bootstrap-ansible.sh'),
                cwd=self._root,
            ),

            # Some configuration may changed, and old facts should be purged.
            self._run_playbook.si(
                os.path.join(utilities, 'ansible_fact_cleanup.yml'),
            ),

            # The user configuration files in /etc/openstack_deploy/ and
            # the environment layout in /etc/openstack_deploy/env.d may
            # have new name values added in new release.
            self._run_playbook.si(
                os.path.join(utilities, 'deploy-config-changes.yml'),
            ),

            # Populate user_secrets.yml with new secrets added in new
            # release.
            self._run_playbook.si(
                os.path.join(utilities, 'user-secrets-adjustment.yml'),
            ),

            # The presence of pip.conf file can cause build failures when
            # upgrading. So better remove it everywhere.
            self._run_playbook.si(
                os.path.join(utilities, 'pip-conf-removal.yml'),
            ),

            # Update the configuration of the repo servers and build a new
            # packages required by new release.
            self._run_playbook.si(
                os.path.join(playbooks, 'repo-install.yml'),
                cwd=playbooks,
            ),
        ])
项目:capillary    作者:celery-capillary    | 项目源码 | 文件源码
def build_tree(self, tasks):
        """Accepts any number of tasks as returned by _get_pipeline.

        :param tasks: dictionary of str:info where str is the name of the task, info is from the registry

        :returns: Graph containing each node connected by dependencies, "after: ALL" nodes will be ignored
        :rtype: networkx.DiGraph

        :raises: :exc:`DependencyError`
        """
        all_task_names = [task for tasks_subset in self.registry.values() for task in tasks_subset]

        # Find dependencies - directed graph of node names
        tree = nx.DiGraph()

        # Add nodes
        for name, info in tasks.items():
            # TODO: doing this twice sucks
            if info['after'] is ALL:
                # ignore these
                continue
            tree.add_node(name, info=info)

        # Add edges
        for name, info in tasks.items():
            if info['after'] is ALL:
                # ignore these
                continue
            for req in info['after']:
                if req not in all_task_names:
                    msg = '"{}" pipeline element was not found, but it is declared as dependency of the pipeline "{}" with arguments "{}"'
                    raise DependencyError(msg.format(req, name, info))
                if req in tree:  # don't add an edge if dependency is not part of the current set of tasks
                    tree.add_edge(req, name)

        # Not as useful as it originally seemed
        # tree = prune_edges(tree)

        # Check for circular dependencies
        try:
            cycle = nx.simple_cycles(tree).next()
            raise DependencyError('Circular dependencies detected: {}'.format(cycle))
        except StopIteration:
            # Good - didn't want any cycles
            pass

        # Joins (merge multiple tasks) have more than one edge in
        # joins = [n for n, d in tree.in_degree_iter() if d > 1]

        # Don't support joins right now, one reducer at the end of the chain
        # if joins:
        #    raise DependencyError('Multiple after values not currently supported joins="{}"'.format(joins))

        # TODO - even with joins this could be a challenge
        # Can't handle "N" shapes, so check for those
        # Descendants of forks, cannot join from outside.
        return tree
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def enumerate_subdomains_for_domain(
        self,
        org_uuid=None,
        domain_uuid=None,
        domain_name=None,
        domain_scan_uuid=None,
        order_uuid=None,
):
    """
    Enumerate subdomains for the given domain name and associate the results with the given domain UUID.
    :param org_uuid: The UUID of the organization to perform the task for.
    :param domain_uuid: The UUID of the parent domain that this subdomain scan is invoked on behalf of.
    :param domain_name: The domain name to enumerate subdomains for.
    :param order_uuid: The UUID of the order that this enumeration is associated with.
    :return: None
    """
    logger.info(
        "Now enumerating subdomains for domain name %s (parent domain %s)."
        % (domain_name, domain_uuid)
    )
    try:
        parent_domain = get_parent_domain_for_subdomain_discovery(domain_name)
    except UnsupportedTldException:
        logger.warning(
            "The domain %s contains a TLD that we do not support."
            % (domain_name,)
        )
        return
    task_sigs = []
    discovery_sigs = []
    task_kwargs = {
        "org_uuid": org_uuid,
        "domain_uuid": domain_uuid,
        "domain_scan_uuid": domain_scan_uuid,
        "parent_domain": parent_domain,
        "order_uuid": order_uuid,
    }
    discovery_sigs.append(enumerate_subdomains_by_dnsdb.si(**task_kwargs))
    task_sigs.append(group(discovery_sigs))
    task_sigs.append(create_and_inspect_domains_from_subdomain_enumeration.si(**task_kwargs))
    canvas_sig = chain(task_sigs)
    self.finish_after(signature=canvas_sig)


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def inspect_http_service(
        self,
        org_uuid=None,
        network_service_scan_uuid=None,
        network_service_uuid=None,
        order_uuid=None,
):
    """
    Inspect the HTTP service running on the given network service on behalf of the given
    organization and network service scan.
    :param org_uuid: The UUID of the organization to inspect the HTTP service on behalf of.
    :param network_service_scan_uuid: The UUID of the network service scan.
    :param network_service_uuid: The UUID of the network service.
    :return: None
    """
    logger.info(
        "Now inspecting HTTP service residing on network service %s. Organization is %s."
        % (network_service_uuid, org_uuid)
    )
    scan_config = self.scan_config
    if scan_config.web_app_enum_vhosts:
        task_sigs = []
        task_kwargs = {
            "org_uuid": org_uuid,
            "network_service_scan_uuid": network_service_scan_uuid,
            "network_service_uuid": network_service_uuid,
            "use_ssl": False,
            "order_uuid": order_uuid,
        }
        task_sigs.append(discover_virtual_hosts_for_web_service.si(**task_kwargs))
        task_sigs.append(inspect_virtual_hosts_for_network_service.si(**task_kwargs))
        logger.info(
            "Now kicking off %s tasks to inspect HTTP service at %s. Organization is %s."
            % (len(task_sigs), network_service_uuid, org_uuid)
        )
        canvas_sig = chain(task_sigs)
        self.finish_after(signature=canvas_sig)
    else:
        populate_and_scan_web_services_from_network_service_scan.si(
            org_uuid=org_uuid,
            network_service_scan_uuid=network_service_scan_uuid,
            network_service_uuid=network_service_uuid,
            use_ssl=False,
            order_uuid=order_uuid,
        ).apply_async()


#USED
项目:ws-backend-community    作者:lavalamp-    | 项目源码 | 文件源码
def inspect_tcp_service_for_ssl_support(
        self,
        org_uuid=None,
        network_service_uuid=None,
        network_service_scan_uuid=None,
        order_uuid=None,
):
    """
    Collect all possible information about SSL support found on the referenced network service.
    :param org_uuid: The UUID of the organization to collect information on behalf of.
    :param network_service_uuid: The UUID of the network service to check for SSL support.
    :param network_service_scan_uuid: The UUID of the network service scan that this SSL support check is associated
    with.
    :return: None
    """
    ip_address = self.network_service.ip_address.address
    port = self.network_service.port
    logger.info(
        "Now inspecting TCP service at %s:%s for SSL data for organization %s. Scan is %s."
        % (ip_address, port, org_uuid, network_service_scan_uuid)
    )
    initial_check = self.inspector.check_ssl_support()
    if not initial_check:
        logger.info(
            "Service at %s:%s does not support any version of SSL."
            % (ip_address, port)
        )
        return
    logger.info(
        "Service at %s:%s supports SSL. Now kicking off subtasks to check for various version support."
        % (ip_address, port)
    )
    task_sigs = []
    task_kwargs = {
        "org_uuid": org_uuid,
        "network_service_uuid": network_service_uuid,
        "network_service_scan_uuid": network_service_scan_uuid,
        "order_uuid": order_uuid,
    }
    collection_sigs = []
    scan_config = self.scan_config
    if scan_config.ssl_enumerate_vulns:
        collection_sigs.append(enumerate_vulnerabilities_for_ssl_service.si(**task_kwargs))
    if scan_config.ssl_enumerate_cipher_suites:
        collection_sigs.append(enumerate_cipher_suites_for_ssl_service.si(**task_kwargs))
    if scan_config.ssl_retrieve_cert:
        collection_sigs.append(retrieve_ssl_certificate_for_tcp_service.si(**task_kwargs))
    task_sigs.append(group(collection_sigs))
    task_sigs.append(create_ssl_support_report_for_network_service_scan.si(**task_kwargs))
    task_sigs.append(apply_flags_to_ssl_support_scan.si(**task_kwargs))
    if config.pubsub_enabled:
        task_sigs.append(publish_report_for_ssl_support_scan.si(**task_kwargs))
    canvas_sig = chain(task_sigs)
    logger.info(
        "Now kicking off %s tasks to inspect SSL support for network service %s."
        % (len(collection_sigs) + 1, network_service_uuid)
    )
    self.finish_after(signature=canvas_sig)


#USED