Python twisted.internet.defer 模块,CancelledError() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用twisted.internet.defer.CancelledError()

项目:bwscanner    作者:TheTorProject    | 项目源码 | 文件源码
def fetch(path, url, state):
    agent = OnionRoutedAgent(reactor, path=path, state=state)
    request = agent.request("GET", url)
    reactor.callLater(10, request.cancel)
    request.addCallback(readBody)

    def parse_ip(body):
        exit_ip = path[-1].ip
        try:
            checked_ip = re.search("<strong>(.*)</strong>", body).group(1)
            return exit_ip, checked_ip
        except AttributeError:
            return exit_ip, None

    request.addCallback(parse_ip)
    def err(failure):
        failure.trap(defer.CancelledError, ResponseNeverReceived,
                     ResponseFailed, HostUnreachable, TTLExpired)
        log.err(failure)
    request.addErrback(err)
    return request
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_clientConnectionFailed(self):
        """
        When a client connection fails, the service removes its reference
        to the protocol and tries again after a timeout.
        """
        clock = Clock()
        cq, service = self.makeReconnector(fireImmediately=False,
                                           clock=clock)
        self.assertEqual(len(cq.connectQueue), 1)
        cq.connectQueue[0].errback(Failure(Exception()))
        whenConnected = service.whenConnected()
        self.assertNoResult(whenConnected)
        # Don't fail during test tear-down when service shutdown causes all
        # waiting connections to fail.
        whenConnected.addErrback(lambda ignored: ignored.trap(CancelledError))
        clock.advance(AT_LEAST_ONE_ATTEMPT)
        self.assertEqual(len(cq.connectQueue), 2)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_whenConnectedErrbacksOnStopService(self):
        """
        L{ClientService.whenConnected} returns a L{Deferred} that
        errbacks with L{CancelledError} if
        L{ClientService.stopService} is called between connection
        attempts.
        """
        clock = Clock()
        cq, service = self.makeReconnector(fireImmediately=False,
                                           clock=clock)
        beforeErrbackAndStop = service.whenConnected()

        # The protocol fails to connect, and the service is waiting to
        # reconnect.
        cq.connectQueue[0].errback(Exception("no connection"))

        service.stopService()
        afterErrbackAndStop = service.whenConnected()

        self.assertIsInstance(self.failureResultOf(beforeErrbackAndStop).value,
                              CancelledError)
        self.assertIsInstance(self.failureResultOf(afterErrbackAndStop).value,
                              CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_execCancelled(self):
        """
        If execution of the command is cancelled via the L{Deferred} returned
        by L{SSHCommandClientEndpoint.connect}, the connection is closed
        immediately.
        """
        self.realm.channelLookup[b'session'] = UnsatisfiedExecSession
        endpoint = self.create()

        factory = Factory()
        factory.protocol = Protocol
        connected = endpoint.connect(factory)
        server, client, pump = self.finishConnection()

        connected.cancel()

        f = self.failureResultOf(connected)
        f.trap(CancelledError)

        self.assertClientTransportState(client, True)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelGetConnectionCancelsEndpointConnect(self):
        """
        Cancelling the C{Deferred} returned from
        L{HTTPConnectionPool.getConnection} cancels the C{Deferred} returned
        by opening a new connection with the given endpoint.
        """
        self.assertEqual(self.pool._connections, {})
        connectionResult = Deferred()

        class Endpoint:
            def connect(self, factory):
                return connectionResult

        d = self.pool.getConnection(12345, Endpoint())
        d.cancel()
        self.assertEqual(self.failureResultOf(connectionResult).type,
                         CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_deprecatedTransport(self):
        """
        Calling L{client.readBody} with a transport that does not implement
        L{twisted.internet.interfaces.ITCPTransport} produces a deprecation
        warning, but no exception when cancelling.
        """
        response = DummyResponse(transportFactory=StringTransport)
        response.transport.abortConnection = None
        d = self.assertWarns(
            DeprecationWarning,
            'Using readBody with a transport that does not have an '
            'abortConnection method',
            __file__,
            lambda: client.readBody(response))
        d.cancel()
        self.failureResultOf(d, defer.CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def _cancelConnectTest(self, connect):
        """
        Helper for implementing a test to verify that cancellation of the
        L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
        implemented to cancel the underlying connector.

        @param connect: A function which will be invoked with a L{ClientCreator}
            instance as an argument and which should call one its I{connect}
            methods and return the result.

        @return: A L{Deferred} which fires when the test is complete or fails if
            there is a problem.
        """
        reactor = MemoryReactorClock()
        cc = ClientCreator(reactor, Protocol)
        d = connect(cc)
        connector = reactor.connectors.pop()
        self.assertFalse(connector._disconnected)
        d.cancel()
        self.assertTrue(connector._disconnected)
        return self.assertFailure(d, CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelConnectTCPTimeout(self):
        """
        L{ClientCreator.connectTCP} inserts a very short delayed call between
        the time the connection is established and the time the L{Deferred}
        returned from one of its connect methods actually fires.  If the
        L{Deferred} is cancelled in this interval, the established connection is
        closed, the timeout is cancelled, and the L{Deferred} fails with
        L{CancelledError}.
        """
        def connect(reactor, cc):
            d = cc.connectTCP('example.com', 1234)
            host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
            protocol = factory.buildProtocol(None)
            transport = StringTransport()
            protocol.makeConnection(transport)
            return d
        return self._cancelConnectTimeoutTest(connect)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelConnectUNIXTimeout(self):
        """
        L{ClientCreator.connectUNIX} inserts a very short delayed call between
        the time the connection is established and the time the L{Deferred}
        returned from one of its connect methods actually fires.  If the
        L{Deferred} is cancelled in this interval, the established connection is
        closed, the timeout is cancelled, and the L{Deferred} fails with
        L{CancelledError}.
        """
        def connect(reactor, cc):
            d = cc.connectUNIX('/foo/bar')
            address, factory, timeout, bindAddress = reactor.unixClients.pop()
            protocol = factory.buildProtocol(None)
            transport = StringTransport()
            protocol.makeConnection(transport)
            return d
        return self._cancelConnectTimeoutTest(connect)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelConnectSSLTimeout(self):
        """
        L{ClientCreator.connectSSL} inserts a very short delayed call between
        the time the connection is established and the time the L{Deferred}
        returned from one of its connect methods actually fires.  If the
        L{Deferred} is cancelled in this interval, the established connection is
        closed, the timeout is cancelled, and the L{Deferred} fails with
        L{CancelledError}.
        """
        def connect(reactor, cc):
            d = cc.connectSSL('example.com', 1234, object())
            host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
            protocol = factory.buildProtocol(None)
            transport = StringTransport()
            protocol.makeConnection(transport)
            return d
        return self._cancelConnectTimeoutTest(connect)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def _cancelConnectFailedTimeoutTest(self, connect):
        """
        Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
        cancelled after the connection attempt has failed but before it is fired
        with the resulting failure.
        """
        reactor = MemoryReactorClock()
        cc = ClientCreator(reactor, Protocol)
        d, factory = connect(reactor, cc)
        connector = reactor.connectors.pop()
        factory.clientConnectionFailed(
            connector, Failure(Exception("Simulated failure")))

        # Sanity check - there is an outstanding delayed call to fire the
        # Deferred.
        self.assertEqual(len(reactor.getDelayedCalls()), 1)

        # Cancel the Deferred, cancelling the delayed call.
        d.cancel()

        self.assertEqual(reactor.getDelayedCalls(), [])

        return self.assertFailure(d, CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancel(self):
        """
        The L{Deferred} returned by L{task.deferLater} can be
        cancelled to prevent the call from actually being performed.
        """
        called = []
        clock = task.Clock()
        d = task.deferLater(clock, 1, called.append, None)
        d.cancel()
        def cbCancelled(ignored):
            # Make sure there are no calls outstanding.
            self.assertEqual([], clock.getDelayedCalls())
            # And make sure the call didn't somehow happen already.
            self.assertFalse(called)
        self.assertFailure(d, defer.CancelledError)
        d.addCallback(cbCancelled)
        return d
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelDeferredListCallback(self):
        """
        When cancelling an unfired L{defer.DeferredList} without the
        C{fireOnOneCallback} and C{fireOnOneErrback} flags set, the
        L{defer.DeferredList} will be callback with a C{list} of
        (success, result) C{tuple}s.
        """
        deferredOne = defer.Deferred(fakeCallbackCanceller)
        deferredTwo = defer.Deferred()
        deferredList = defer.DeferredList([deferredOne, deferredTwo])
        deferredList.cancel()
        self.failureResultOf(deferredTwo, defer.CancelledError)
        result = self.successResultOf(deferredList)
        self.assertTrue(result[0][0])
        self.assertEqual(result[0][1], "Callback Result")
        self.assertFalse(result[1][0])
        self.assertTrue(result[1][1].check(defer.CancelledError))
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelDeferredListWithFireOnOneErrback(self):
        """
        When cancelling an unfired L{defer.DeferredList} with the flag
        C{fireOnOneErrback} set, cancel every L{defer.Deferred} in the list.
        """
        deferredOne = defer.Deferred()
        deferredTwo = defer.Deferred()
        deferredList = defer.DeferredList([deferredOne, deferredTwo],
                                          fireOnOneErrback=True)
        deferredList.cancel()
        self.failureResultOf(deferredOne, defer.CancelledError)
        self.failureResultOf(deferredTwo, defer.CancelledError)
        deferredListFailure = self.failureResultOf(deferredList,
                                                   defer.FirstError)
        firstError = deferredListFailure.value
        self.assertTrue(firstError.subFailure.check(defer.CancelledError))
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_noCancellerMultipleCancelsAfterCancelAndErrback(self):
        """
        A L{defer.Deferred} without a canceller, when cancelled and then
        errbacked, ignores multiple cancels thereafter.
        """
        d = defer.Deferred()
        d.addCallbacks(self._callback, self._errback)
        d.cancel()
        self.assertEqual(self.errbackResults.type, defer.CancelledError)
        currentFailure = self.errbackResults
        # One errback will be ignored
        d.errback(GenericError())
        # I.e., we should still have a CancelledError.
        self.assertEqual(self.errbackResults.type, defer.CancelledError)
        d.cancel()
        self.assertIs(currentFailure, self.errbackResults)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancellerMultipleCancel(self):
        """
        Verify that calling cancel multiple times on a deferred with a
        canceller that does not errback results in a
        L{defer.CancelledError} and that subsequent calls to cancel do not
        cause an error and that after all that, the canceller was only
        called once.
        """
        def cancel(d):
            self.cancellerCallCount += 1

        d = defer.Deferred(canceller=cancel)
        d.addCallbacks(self._callback, self._errback)
        d.cancel()
        self.assertEqual(self.errbackResults.type, defer.CancelledError)
        currentFailure = self.errbackResults
        d.cancel()
        self.assertIs(currentFailure, self.errbackResults)
        self.assertEqual(self.cancellerCallCount, 1)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelNestedDeferred(self):
        """
        Verify that a Deferred, a, which is waiting on another Deferred, b,
        returned from one of its callbacks, will propagate
        L{defer.CancelledError} when a is cancelled.
        """
        def innerCancel(d):
            self.cancellerCallCount += 1
        def cancel(d):
            self.assertTrue(False)

        b = defer.Deferred(canceller=innerCancel)
        a = defer.Deferred(canceller=cancel)
        a.callback(None)
        a.addCallback(lambda data: b)
        a.cancel()
        a.addCallbacks(self._callback, self._errback)
        # The cancel count should be one (the cancellation done by B)
        self.assertEqual(self.cancellerCallCount, 1)
        # B's canceller didn't errback, so defer.py will have called errback
        # with a CancelledError.
        self.assertEqual(self.errbackResults.type, defer.CancelledError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_errbackAddedBeforeTimeout(self):
        """
        An errback added before a timeout is added errbacks with a
        L{defer.CancelledError} when the timeout fires.  If the
        errback returns the L{defer.CancelledError}, it is translated
        to a L{defer.TimeoutError} by the timeout implementation.
        """
        clock = Clock()
        d = defer.Deferred()

        dErrbacked = [None]

        def errback(f):
            dErrbacked[0] = f
            return f

        d.addErrback(errback)
        d.addTimeout(10, clock)

        clock.advance(15)

        self.assertIsInstance(dErrbacked[0], failure.Failure)
        self.assertIsInstance(dErrbacked[0].value, defer.CancelledError)

        self.failureResultOf(d, defer.TimeoutError)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_errbackAddedBeforeTimeoutCustom(self):
        """
        An errback added before a timeout is added with a custom
        timeout function errbacks with a L{defer.CancelledError} when
        the timeout fires.  The timeout function runs if the errback
        returns the L{defer.CancelledError}.
        """
        clock = Clock()
        d = defer.Deferred()

        dErrbacked = [None]

        def errback(f):
            dErrbacked[0] = f
            return f

        d.addErrback(errback)
        d.addTimeout(10, clock, _overrideFunc)

        clock.advance(15)

        self.assertIsInstance(dErrbacked[0], failure.Failure)
        self.assertIsInstance(dErrbacked[0].value, defer.CancelledError)

        self.assertEqual("OVERRIDDEN", self.successResultOf(d))
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_errbackAddedBeforeTimeoutSuppressesCancellationCustom(self):
        """
        An errback added before a timeout is added with a custom
        timeout function errbacks with a L{defer.CancelledError} when
        the timeout fires.  The timeout function runs if the errback
        suppresses the L{defer.CancelledError}.
        """
        clock = Clock()
        d = defer.Deferred()

        dErrbacked = [None]

        def errback(f):
            dErrbacked[0] = f

        d.addErrback(errback)
        d.addTimeout(10, clock, _overrideFunc)

        clock.advance(15)

        self.assertIsInstance(dErrbacked[0], failure.Failure)
        self.assertIsInstance(dErrbacked[0].value, defer.CancelledError)

        self.assertEqual("OVERRIDDEN", self.successResultOf(d))
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_routeHandlesRequestFinished(self):
        app = self.app
        request = requestMock(b"/")

        cancelled = []

        @app.route("/")
        def root(request):
            _d = Deferred()
            _d.addErrback(cancelled.append)
            request.notifyFinish().addCallback(lambda _: _d.cancel())
            return _d

        d = _render(self.kr, request)

        request.finish()

        self.assertFired(d)

        cancelled[0].trap(CancelledError)
        self.assertEqual(request.getWrittenData(), b'')
        self.assertEqual(request.writeCount, 1)
        self.assertEqual(request.processingFailed.call_count, 0)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_cancelsOnConnectionLost(self):
        app = self.app
        request = requestMock(b"/")

        handler_d = Deferred()

        @app.route("/")
        def root(request):
            return handler_d

        d = _render(self.kr, request)

        self.assertNotFired(d)

        request.connectionLost(ConnectionLost())

        handler_d.addErrback(lambda f: f.trap(CancelledError))

        d.addErrback(lambda f: f.trap(ConnectionLost))
        d.addCallback(lambda _: handler_d)
        self.assertFired(d)
项目:txamqp    作者:txamqp    | 项目源码 | 文件源码
def close(self, reason=None, within=0):
        """Explicitely close the connection.

        @param reason: Optional closing reason. If not given, ConnectionDone
            will be used.
        @param within: Shutdown the client within this amount of seconds. If
            zero (the default), all channels and queues will be closed
            immediately. If greater than 0, try to close the AMQP connection
            cleanly, by sending a "close" method and waiting for "close-ok". If
            no reply is received within the given amount of seconds, the
            transport will be forcely shutdown.
        """
        if self.closed:
            return

        if reason is None:
            reason = ConnectionDone()

        if within > 0:
            channel0 = yield self.channel(0)
            deferred = channel0.connection_close()
            call = self.clock.callLater(within, deferred.cancel)
            try:
                yield deferred
            except defer.CancelledError:
                pass
            else:
                call.cancel()

        self.do_close(reason)
项目:p2pool-bch    作者:amarian12    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:txaio-etcd    作者:crossbario    | 项目源码 | 文件源码
def watch(self, keys, on_watch, filters=None, start_revision=None, return_previous=None):
        """
        Watch one or more keys or key sets and invoke a callback.

        Watch watches for events happening or that have happened. The entire event history
        can be watched starting from the last compaction revision.

        :param keys: Watch these keys / key sets.
        :type keys: list of bytes or list of instance of :class:`txaioetcd.KeySet`

        :param on_watch: The callback to invoke upon receiving
            a watch event.
        :type on_watch: callable

        :param start_revision: start_revision is an optional
            revision to watch from (inclusive). No start_revision is "now".
        :type start_revision: int
        """
        d = self._start_watching(keys, on_watch, filters, start_revision, return_previous)

        def on_err(err):
            if err.type == CancelledError:
                # swallow canceling!
                pass
            else:
                return err

        d.addErrback(on_err)

        return d
项目:p2pool-unitus    作者:amarian12    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:flowder    作者:amir-khakshour    | 项目源码 | 文件源码
def failed(self, failure, job_id):
        if failure.check(CancelledError):
            self.job_failed("Response max size exceeded! job id: %s!" % job_id, job_id)

        elif failure.check(InvalidResponseRetry):
            ex = failure.value
            if job_id in self.retry_counter and self.retry_counter[job_id] == self.max_retry:
                self.job_failed("Max retry has been reached! job id: %s!" % job_id, job_id)
            else:
                self.job_failed_retry(ex.message, job_id)

        elif failure.check(ResponseNeverReceived):
            self.job_failed("No response from the server! job id: %s!" % job_id, job_id)

        elif failure.check(ResponseFailed):
            # @TODO add retry
            self.job_failed("Connection to server failed, retry .... %s!" % job_id, job_id)

        elif failure.check(NoResponseContent):
            self.job_failed("Response has no content .... %s!" % job_id, job_id)

        elif failure.check(TimeoutError):
            if job_id in self.retry_counter and self.retry_counter[job_id] == self.max_retry:
                self.job_failed("Max retry has been reached! job id: %s!" % job_id, job_id)
            else:
                self.job_failed_retry("Request timeout .... %s!" % job_id, job_id)
        elif failure.check(ConnectionRefusedError):
            if job_id in self.retry_counter and self.retry_counter[job_id] == self.max_retry:
                self.job_failed("Max retry has been reached! job id: %s!" % job_id, job_id)
            else:
                self.job_failed_retry("Connection refused .... %s!" % job_id, job_id)

        else:
            ex = failure.value
            self.job_failed("No proper failure found: %s, \n %s!" % (job_id, ex.message), job_id)
            failure.printTraceback()
项目:p2pool-dgb-sha256    作者:ilsawa    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:p2pool-ltc    作者:ilsawa    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:p2pool-bsty    作者:amarian12    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:fluiddb    作者:fluidinfo    | 项目源码 | 文件源码
def _logInternalError(request, exception, fail, resourceClass):
    """
    Log a FluidDB internal error. Apart from doing the obvious things, we
    also pull all relevant tags off any Thrift error (our internal
    errors tend to come from calls we've made to the facade service via
    Thrift) and log them too.
    """
    log.msg('Request %s: Exception calling %r#deferred_render_%r ' %
            (request._fluidDB_reqid, resourceClass.__class__.__name__,
             request.method))
    log.msg(exception)
    traceback = fail.getTraceback()

    # If we get a CancelledError, we only log it as a warning, this is not a
    # sever error and it causes too much noise in the log files.
    if fail.check(CancelledError):
        logging.warning(traceback)
    else:
        logging.error(traceback)

    tags = thriftExceptions.get(exception.__class__)
    if tags:
        msg = []
        for tag in tags:
            msg.append('Failure tag %r: %r' %
                       (tag, getattr(exception, tag)))
        if msg:
            log.msg('\n'.join(msg))
项目:fluiddb    作者:fluidinfo    | 项目源码 | 文件源码
def testDelayedDisconnectDoesNotFinishRequest(self):
        """
        A C{CancelledError} exception is raised if content cannot be read
        from the request midway through processing, due to the client
        disconnecting.  In such cases, the C{Request.finish} method is not
        invoked by the L{handleRequestError} handler to avoid causing a
        failure in Twisted.
        """
        failure = Failure(CancelledError("Client disconnected partway."))
        handleRequestError(failure, self.request, self.resource)
        self.assertFalse(self.request.finished)
项目:p2pool-cann    作者:ilsawa    | 项目源码 | 文件源码
def start(self, period):
        assert not self.running
        self.running = True
        self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
项目:crondeamon    作者:zhoukunpeng504    | 项目源码 | 文件源码
def _init(cls,tid,initcode=True):
        try:
            tid=int(tid)
            result=yield  run_conn_fun("runQuery","select ip,svnpath,svnuser,svnpasswd,version,rule    from   cron_task WHERE  tid=%s",(tid,))
            ip,svnpath,svnuser,svnpasswd,svnversion,rule=result[0]
            if  initcode==True:
                _defer =SubRpc().xmlrpc_init(int(tid),svnpath,int(svnversion),svnuser,svnpasswd)
                set_time_out(2,_defer)
                try:
                    yield  _defer
                except defer.CancelledError :
                    pass
            else:
                pass
            if not  cls.BUFF.has_key(tid):
                pass
            else:
                if cls.BUFF[tid].running:
                    cls.BUFF[tid].stop()
            schedule=CronSchedule(rule)
            sc=ScheduledCall(cls._run,tid)
            sc.start(schedule)
            cls.BUFF[tid]=sc
            defer.returnValue(True)
        except Exception as e:
            defer.returnValue((False,str(e)))
项目:crondeamon    作者:zhoukunpeng504    | 项目源码 | 文件源码
def _init(cls,tid,initcode=True):
        try:
            tid=int(tid)
            result=yield  run_conn_fun("runQuery","select ip,svnpath,svnuser,svnpasswd,version      from   task_task WHERE  tid=%s",(tid,))
            ip,svnpath,svnuser,svnpasswd,svnversion =result[0]
            if  initcode==True:
                _defer =SubRpc().xmlrpc_init(tid,svnpath,svnversion,svnuser,svnpasswd,mode="task")
                set_time_out(2,_defer)
                try:
                    yield  _defer
                except defer.CancelledError :
                    pass
            else:
                pass
            if not  cls.BUFF.has_key(tid):
                pass
            else:
                if cls.BUFF[tid].running:
                    cls.BUFF[tid].stop()
                else:
                    pass
                del cls.BUFF[tid]
            _task=task.LoopingCall(cls._check,tid)
            _task.start(60,now=False)  #??????
            yield  cls._check(tid)
            cls.BUFF[tid]=_task
            defer.returnValue(True)
        except Exception as e:
            defer.returnValue((False,str(e)))
项目:iotdm-pyclient    作者:peterchauyw    | 项目源码 | 文件源码
def print_error(self, failure, card):
        r = failure.trap(InvalidURI, FragmentNotAllowed, socket.gaierror, socket.error, error.RequestTimedOut, defer.CancelledError)
        if r == InvalidURI:
            log.msg("Error: invalid URI")
            card.response_payload.text = "Error: Invalid URI!"
        elif r == FragmentNotAllowed:
            log.msg("Error: fragment found")
            card.response_payload.text = "Error: URI fragment not allowed for CoAP!"
        elif r == socket.gaierror or r == socket.error:
            log.msg("Error: hostname not found")
            card.response_payload.text = "Error: hostname not found!"
        elif r == error.RequestTimedOut:
            log.msg("Error: request timed out")
            card.response_payload.text = 'Error: request timed out!'
项目:afkak    作者:ciena    | 项目源码 | 文件源码
def _handle_offset_error(self, failure):
        """
        Retry the offset fetch request if appropriate.

        Once the :attr:`.retry_delay` reaches our :attr:`.retry_max_delay`, we
        log a warning.  This should perhaps be extended to abort sooner on
        certain errors.
        """
        # outstanding request got errback'd, clear it
        self._request_d = None

        if self._stopping and failure.check(CancelledError):
            # Not really an error
            return
        # Do we need to abort?
        if (self.request_retry_max_attempts != 0 and
                self._fetch_attempt_count >= self.request_retry_max_attempts):
            log.debug(
                "%r: Exhausted attempts: %d fetching offset from kafka: %r",
                self, self.request_retry_max_attempts, failure)
            self._start_d.errback(failure)
            return
        # Decide how to log this failure... If we have retried so many times
        # we're at the retry_max_delay, then we log at warning every other time
        # debug otherwise
        if (self.retry_delay < self.retry_max_delay or
                0 == (self._fetch_attempt_count % 2)):
            log.debug("%r: Failure fetching offset from kafka: %r", self,
                      failure)
        else:
            # We've retried until we hit the max delay, log at warn
            log.warning("%r: Still failing fetching offset from kafka: %r",
                        self, failure)
        self._retry_fetch()
项目:afkak    作者:ciena    | 项目源码 | 文件源码
def _handle_commit_error(self, failure, retry_delay, attempt):
        """ Retry the commit request, depending on failure type

        Depending on the type of the failure, we retry the commit request
        with the latest processed offset, or callback/errback self._commit_ds
        """
        # Check if we are stopping and the request was cancelled
        if self._stopping and failure.check(CancelledError):
            # Not really an error
            return self._deliver_commit_result(self._last_committed_offset)

        # Check that the failure type is a Kafka error...this could maybe be
        # a tighter check to determine whether a retry will succeed...
        if not failure.check(KafkaError):
            log.error("Unhandleable failure during commit attempt: %r\n\t%r",
                      failure, failure.getBriefTraceback())
            return self._deliver_commit_result(failure)

        # Do we need to abort?
        if (self.request_retry_max_attempts != 0 and
                attempt >= self.request_retry_max_attempts):
            log.debug("%r: Exhausted attempts: %d to commit offset: %r",
                      self, self.request_retry_max_attempts, failure)
            return self._deliver_commit_result(failure)

        # Check the retry_delay to see if we should log at the higher level
        # Using attempts % 2 gets us 1-warn/minute with defaults timings
        if (retry_delay < self.retry_max_delay or 0 == (attempt % 2)):
            log.debug("%r: Failure committing offset to kafka: %r", self,
                      failure)
        else:
            # We've retried until we hit the max delay, log alternately at warn
            log.warning("%r: Still failing committing offset to kafka: %r",
                        self, failure)

        # Schedule a delayed call to retry the commit
        retry_delay = min(retry_delay * REQUEST_RETRY_FACTOR,
                          self.retry_max_delay)
        self._commit_call = self._get_clock().callLater(
            retry_delay, self._send_commit_request, retry_delay, attempt + 1)
项目:afkak    作者:ciena    | 项目源码 | 文件源码
def _handle_processor_error(self, failure):
        """Handle a failure in the processing of a block of messages

        This method is called when the processor func fails while processing
        a block of messages. Since we can't know how best to handle a
        processor failure, we just :func:`errback` our :func:`start` method's
        deferred to let our user know about the failure.
        """
        # Check if we're stopping/stopped and the errback of the processor
        # deferred is just the cancelling we initiated.  If so, we skip
        # notifying via the _start_d deferred, as it will be 'callback'd at the
        # end of stop()
        if not (self._stopping and failure.check(CancelledError)):
            if self._start_d:  # Make sure we're not already stopped
                self._start_d.errback(failure)
项目:afkak    作者:ciena    | 项目源码 | 文件源码
def test_consumer_stop_during_commit(self):
        # setup a client which will return a message block in response to fetch
        # and just fail on the commit
        mockclient = Mock()
        mockclient.send_offset_commit_request.return_value = Deferred()
        mockclient.send_fetch_request.return_value = Deferred()
        the_group = 'U2'
        the_topic = 'test_consumer_stop_during_commit'
        the_part = 11
        the_offset = 0
        # Create a consumer and muck with the state a bit...
        consumer = Consumer(mockclient, the_topic, the_part, Mock(), the_group,
                            auto_commit_every_ms=0)
        mockback = Mock()
        start_d = consumer.start(the_offset)
        start_d.addCallback(mockback)
        consumer._last_processed_offset = the_offset  # Fake processed msgs

        # Start a commit, don't fire the deferred, assert there's no result
        commit_d = consumer.commit()
        self.assertNoResult(commit_d)
        self.assertEqual(consumer._commit_ds[0], commit_d)

        # Stop the consumer, assert the start_d fired, and commit_d errbacks
        consumer.stop()
        mockback.assert_called_once_with('Stopped')
        self.failureResultOf(commit_d, CancelledError)
项目:ccs-twistedextensions    作者:apple    | 项目源码 | 文件源码
def test_disconnectWhileConnecting(self):
        """
        When the L{IConnector} is told to C{disconnect} before an in-progress
        L{Deferred} from C{connect} has fired, it will cancel that L{Deferred}.
        """
        self.connector.disconnect()
        self.assertEqual(len(self.factory.fails), 1)
        self.assertTrue(self.factory.fails[0].reason.check(CancelledError))
项目:ccs-twistedextensions    作者:apple    | 项目源码 | 文件源码
def test_stopConnectingWhileConnecting(self):
        """
        When the L{IConnector} is told to C{stopConnecting} while another
        attempt is still in flight, it cancels that connection.
        """
        self.connector.stopConnecting()
        self.assertEqual(len(self.factory.fails), 1)
        self.assertTrue(self.factory.fails[0].reason.check(CancelledError))
项目:voltha    作者:opencord    | 项目源码 | 文件源码
def _request_failure(self, value, tx_tid):
        if tx_tid in self._requests:
            (_, _, _, timeout) = self._requests.pop(tx_tid)
        else:
            # tx_msg = None
            timeout = 0

        if isinstance(value, failure.Failure):
            value.trap(CancelledError)
            self._rx_timeouts += 1
            self._consecutive_errors += 1
            self.log.info('timeout', tx_id=tx_tid, timeout=timeout)
            value = failure.Failure(TimeoutError(timeout, "Deferred"))

        return value
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_stopServiceBeforeStartFinished(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not yet fired.  No error will be logged
        about the cancellation of the listen attempt.
        """
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addBoth(l.append)
        self.assertEqual(l, [None])
        self.assertEqual(self.flushLoggedErrors(CancelledError), [])
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_stopServiceCancelStartError(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not fired yet.  An error will be logged
        if the resulting exception is not L{CancelledError}.
        """
        self.fakeServer.cancelException = ZeroDivisionError()
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEqual(l, [None])
        stoppingErrors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(stoppingErrors), 1)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_startServiceWhileStopped(self):
        """
        When L{ClientService} is stopped - that is,
        L{ClientService.stopService} has been called and the L{Deferred} it
        returns has fired - calling L{startService} will cause a new connection
        to be made, and new calls to L{whenConnected} to succeed.
        """
        cq, service = self.makeReconnector(fireImmediately=False)
        stopped = service.stopService()
        self.successResultOf(stopped)
        self.failureResultOf(service.whenConnected(), CancelledError)
        service.startService()
        cq.connectQueue[-1].callback(None)
        self.assertIdentical(cq.applicationProtocols[-1],
                             self.successResultOf(service.whenConnected()))
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_clientConnectionLostWhileStopping(self):
        """
        When a client connection is lost while the service is stopping, the
        protocol stopping deferred is called and the reference to the protocol
        is removed.
        """
        clock = Clock()
        cq, service = self.makeReconnector(clock=clock)
        d = service.stopService()
        cq.constructedProtocols[0].connectionLost(Failure(IndentationError()))
        self.failureResultOf(service.whenConnected(), CancelledError)
        self.assertTrue(d.called)
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def _executeCommand(self, connection, protocolFactory):
        """
        Given a secured SSH connection, try to execute a command in a new
        channel created on it and associate the result with a protocol from the
        given factory.

        @param connection: See L{SSHCommandClientEndpoint.existingConnection}'s
            C{connection} parameter.

        @param protocolFactory: See L{SSHCommandClientEndpoint.connect}'s
            C{protocolFactory} parameter.

        @return: See L{SSHCommandClientEndpoint.connect}'s return value.
        """
        commandConnected = Deferred()
        def disconnectOnFailure(passthrough):
            # Close the connection immediately in case of cancellation, since
            # that implies user wants it gone immediately (e.g. a timeout):
            immediate =  passthrough.check(CancelledError)
            self._creator.cleanupConnection(connection, immediate)
            return passthrough
        commandConnected.addErrback(disconnectOnFailure)

        channel = _CommandChannel(
            self._creator, self._command, protocolFactory, commandConnected)
        connection.openChannel(channel)
        return commandConnected
项目:zenchmarks    作者:squeaky-pl    | 项目源码 | 文件源码
def test_connectionCancelledBeforeSecure(self):
        """
        If the connection is cancelled before the SSH transport layer has
        finished key exchange (ie, gotten to the point where we may attempt to
        authenticate), the L{Deferred} returned by
        L{SSHCommandClientEndpoint.connect} fires with a L{Failure} wrapping
        L{CancelledError} and the connection is aborted.
        """
        endpoint = SSHCommandClientEndpoint.newConnection(
            self.reactor, b"/bin/ls -l", b"dummy user",
            self.hostname, self.port, knownHosts=self.knownHosts,
            ui=FixedResponseUI(False))

        factory = Factory()
        factory.protocol = Protocol
        d = endpoint.connect(factory)

        transport = AbortableFakeTransport(None, isServer=False)
        factory = self.reactor.tcpClients[0][2]
        client = factory.buildProtocol(None)
        client.makeConnection(transport)
        d.cancel()

        self.failureResultOf(d).trap(CancelledError)
        self.assertTrue(transport.aborted)
        # Make sure the connection closing doesn't result in unexpected
        # behavior when due to cancellation:
        client.connectionLost(Failure(ConnectionDone()))