Python requests.exceptions 模块,ChunkedEncodingError() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用requests.exceptions.ChunkedEncodingError()

项目:CraigslistToCsv    作者:Pushkr    | 项目源码 | 文件源码
def __pagefetcher__(self, worker):
        temp_url = None
        with self.fetchList_lock:
            if self.Qurl.empty() is False:
                temp_url = self.Qurl.get()

        if temp_url is not None:
            try:
                webdata_full = requests.get(temp_url)
                soup = BeautifulSoup(webdata_full.text, "html.parser")
                result1 = soup.find("div", {"class": "content"})
                temp_rows = result1.find_all("li", {"class": "result-row"})
            except (ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as err:
                print("\nError connecting site : {0} \n".format(err))
                return
            except ChunkedEncodingError:
                print("\nSite response delayed, skipping retrieval..: {0}".format(sys.exc_info()[0]))
            finally:
                with self.appendList_lock:
                    # print(threading.current_thread().name, self.Qurl.qsize(), temp_url)
                    if len(temp_rows) > 0:
                        self.result_pages.append(temp_rows)
        else:
            pass
项目:CraigslistToCsv    作者:Pushkr    | 项目源码 | 文件源码
def __fetchPage__(self):
        # print(self.url)
        try:
            webdata = requests.get(self.url)
            soup = BeautifulSoup(webdata.text, "html.parser")
            sites = soup.find_all("div", {"class": "colmask"})
            self.ListA = sites[self.dict_map.get(self.continent)]
            self.__extractFromList__()
        except ConnectionError as err:
            print("\nError connecting site : {0} \n".format(err))
            return
        except ChunkedEncodingError:
            print("\n Site data delayed, skipping retrieval.. : {0} \n".format(sys.exc_info()[0]))
        except TypeError:
            print("\n Continent not found.")
            print(" Hint: Use one of these - {0}".format(set(self.dict_map)))
        except:
            print("\n Unknown error : {0} \n".format(sys.exc_info()[0]))
        return
项目:emoji-prediction    作者:javierhonduco    | 项目源码 | 文件源码
def run_twitter_fetcher():
    while True:
        try:
            l.info('starting streamer...')
            streamer = TwitterEmojiStreamer(TwitterAuth.CONSUMER_KEY,
                                TwitterAuth.CONSUMER_SECRET,
                                TwitterAuth.ACCESS_TOKEN,
                                TwitterAuth.ACCESS_TOKEN_SECRET)
            streamer.statuses.filter(track=words)
        # requests.exceptions.ConnectionError
        except ChunkedEncodingError:
            l.debug('chunked_encoding_error happened')
            pass
        except ConnectionError:
            l.debug('connection_error happened')
            pass
        except Exception as e:
            l.error('exception ocurred')
            l.error(e)
            raise e
            #print(streamer.show_stats())
项目:emoji-prediction    作者:javierhonduco    | 项目源码 | 文件源码
def run_twitter_fetcher():
    sentry = Client(SENTRY_DSN)
    while True:
        try:
            l.info('starting streamer with {} emojis...'.format(len(EMOJIS)))
            sentry.captureMessage('starting `emoji-prediction`')
            streamer = TwitterEmojiStreamer(TwitterAuth.CONSUMER_KEY,
                                TwitterAuth.CONSUMER_SECRET,
                                TwitterAuth.ACCESS_TOKEN,
                                TwitterAuth.ACCESS_TOKEN_SECRET)
            streamer.statuses.filter(track=EMOJIS, language=LANGUAGE)
        # requests.exceptions.ConnectionError
        except ChunkedEncodingError:
            l.debug('chunked_encoding_error happened')
            pass
        except ConnectionError:
            l.debug('connection_error happened')
            pass
        except UnknownTwitterEmojiException as e:
            l.error('unknown exception ocurred')
            l.error(e)
            sentry.captureException()
项目:pubchem-ranker    作者:jacobwindsor    | 项目源码 | 文件源码
def pubchem_counter(self, cid, collection):
        """
        Use the SDQAgent that PubChem uses on their compound pages to get counts for a collection.

        cid: integer. The pubchem compound identifier
        Collection. String. One of the pubchem collections. E.g. "bioactivity" or "biocollection"

        Returns: Integer count
        """

        uri = 'https://pubchem.ncbi.nlm.nih.gov/sdq/sdqagent.cgi?' \
              'infmt=json&outfmt=json' \
              '&query={"select":["*"],"collection":"%s",' \
              '"where":{"ors":{"cid":"%s"}},"start":1,"limit":1}' % (collection, cid)

        try:
            response = get(uri).json()
            try:
                count = response['SDQOutputSet'][0]['totalCount']
                sys.stdout.write(str(count) + "\n")
                sys.stdout.flush()
                return count
            except KeyError:
                return None

        except (exceptions.ConnectionError, TimeoutError, exceptions.Timeout,
                exceptions.ConnectTimeout, exceptions.ReadTimeout) as e:
            # Error. return the error and the CID number that this error occured on
            # Save what have so far
            sys.stderr.write("Error: %s. Occurred on CID: %s", (e, cid))
            sys.stderr.flush()
            sys.stdout.flush()
            quit()
        except exceptions.ChunkedEncodingError as e:
            sys.stderr.write("Error: %s. Occurred on CID: %s", (e, cid))
            sys.stderr.flush()
            quit()
项目:ProxyPool    作者:Germey    | 项目源码 | 文件源码
def main():
    count = 0
    while True:
        # print('? ',count,' ???')
        count = count + 1
        try:
            #????????headers
            global headers,count_proxys
            headers = {'User-Agent': ua.random}
            count_proxys = get_count_proxys()
            print('????? ',count_proxys,'  ????????',proxy,'\n',headers)
            start_time = time.clock()
            html = crawl('http://www.baidu.com', proxy)
            end_time = time.clock()
            print('??????? ',(str(end_time-start_time))[:4],' ?')
            if html.status_code==200:
                print(html)
                return count
                break
            elif count>=10:
                print('??????')
                break

        except (ChunkedEncodingError,ConnectionError,Timeout,UnboundLocalError,UnicodeError,ProxyError):
            global proxy
            proxy = get_proxy()
            print('????,????','\n')
            # print(' ')
项目:PressSecBotPlus    作者:robmathers    | 项目源码 | 文件源码
def main():
    config = load_config()
    api = api_from_config(config)

    # Check that config file is writeable
    if not os.access(config_file, os.W_OK):
        print 'Write access to config file unavailable, exiting.'
        sys.exit(1)

    account_to_follow = config.get('settings', 'account_to_follow')

    try:
        last_tweet_id = int(config.get('saved_state', 'last_tweet_id'))
    except (NoOptionError, NoSectionError) as e:
        last_tweet_id = None

    release_backlog_tweets(api, account_to_follow, last_tweet_id)

    # Catch Twitter streaming errors, ratelimited to prevent infinite respawn
    exception_datestamps = []
    while within_exception_rate_limit(exception_datestamps):
        try:
            update_from_stream(api, account_to_follow)
        except (TwitterError, ChunkedEncodingError):
            exception_datestamps.append(datetime.now())
        except StopIteration:
            from time import sleep
            print 'Got streaming StopIteration; waiting 10 seconds to resume.'
            sleep(10)
            exception_datestamps.append(datetime.now())
项目:etc    作者:sublee    | 项目源码 | 文件源码
def get(self, key, recursive=False, sorted=False, quorum=False,
            wait=False, wait_index=None, timeout=None):
        """Requests to get a node by the given key."""
        url = self.make_key_url(key)
        params = self.build_args({
            'recursive': (bool, recursive or None),
            'sorted': (bool, sorted or None),
            'quorum': (bool, quorum or None),
            'wait': (bool, wait or None),
            'waitIndex': (int, wait_index),
        })
        if timeout is None:
            # Try again when :exc:`TimedOut` thrown.
            while True:
                try:
                    try:
                        res = self.session.get(url, params=params)
                    except:
                        self.erred()
                except (TimedOut, ChunkedEncodingError):
                    continue
                else:
                    break
        else:
            try:
                res = self.session.get(url, params=params, timeout=timeout)
            except ChunkedEncodingError:
                raise TimedOut
            except:
                self.erred()
        return self.wrap_response(res)
项目:openkamer    作者:openkamer    | 项目源码 | 文件源码
def create_parliament_members(max_results=None, all_members=False, update_votes=True):
    logger.info('BEGIN')
    parliament = Parliament.get_or_create_tweede_kamer()
    if all_members:
        member_wikidata_ids = wikidata.search_parliament_member_ids()
    else:
        member_wikidata_ids = wikidata.search_parliament_member_ids_with_start_date()
    counter = 0
    members = []
    for person_wikidata_id in member_wikidata_ids:
        logger.info('=========================')
        try:
            members += create_parliament_member_from_wikidata_id(parliament, person_wikidata_id)
        except (JSONDecodeError, ConnectionError, ConnectTimeout, ChunkedEncodingError) as error:
            logger.exception(error)
        except Exception as error:
            logger.exception(error)
            raise
        counter += 1
        if max_results and counter >= max_results:
            logger.info('END: max results reached')
            break
    if update_votes:
        set_individual_votes_derived_info()
    logger.info('END')
    return members
项目:CraigslistToCsv    作者:Pushkr    | 项目源码 | 文件源码
def __rowfetcher__(self, worker):
        row = None
        with self.fetchList_lock:
            if self.Qurl.empty() is False:
                row = self.Qurl.get()

        if row is None:
            return

        id_url = row.find("a", {"class": "hdrlnk"})
        lurl = (id_url.get("href"))
        posting = urlparse(lurl)
        if posting.netloc == '':
            post_url = urlparse(self.url).scheme + "://" + urlparse(self.url).netloc + posting.path
        else:
            post_url = urlparse(self.url).scheme + "://" + posting.netloc + posting.path

        title = id_url.text if id_url is not None else "No Title"

        span = row.find("span", {"class": "result-price"})
        price = (span.text if span is not None else "Not Listed")

        # Retrieve post details
        try:
            post_data = requests.get(post_url)
            post_soup = BeautifulSoup(post_data.text, "html.parser")
            pspan = post_soup.find("span", {"class": "postingtitletext"})
            pbody = post_soup.find("section", {"id": "postingbody"})
        except (ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as err:
            print("\nError connecting site : {0} \n".format(err))
            return
        except ChunkedEncodingError:
            print("\nSite response delayed, skipping retrieval..: {0}".format(sys.exc_info()[0]))

        location = "Not Listed"
        try:
            location = pspan.small.text if pspan is not None else "Not Listed"
        except AttributeError:
            pass

        body_text = pbody.text if pbody is not None else "Not Listed"

        pbody = post_soup.find_all("p", {"class": "postinginfo"})

        post_time, upd_time = ["N/A", "N/A"]
        try:
            if pbody[2].find("time", {"class": "timeago"}) is not None:
                post_time = (pbody[2].find("time", {"class": "timeago"}))['datetime'].split("T")
            if pbody[3].find("time", {"class": "timeago"}) is not None:
                upd_time = (pbody[3].find("time", {"class": "timeago"}))['datetime'].split("T")
        except:
            pass
        # push retrieved details into global list
        with self.appendList_lock:
            self.items.append((title, post_url, price, location, post_time[0], post_time[1][:-5],
                               upd_time[0], upd_time[1][:-5], body_text))
项目:Callandtext    作者:iaora    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
项目:python_ddd_flask    作者:igorvinnicius    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
项目:ws-cli    作者:hack4sec    | 项目源码 | 文件源码
def run(self):
        """ Run thread """
        req_func = getattr(self.http, self.method)
        need_retest = False

        while not self.done:
            self.last_action = int(time.time())

            if self.delay:
                time.sleep(self.delay)
            try:
                if not need_retest:
                    word = self.queue.get()
                    self.counter.up()

                url = "{0}://{1}{2}".format(self.protocol, self.domain, word)

                try:
                    resp = req_func(url)
                except ConnectionError:
                    need_retest = True
                    self.http.change_proxy()
                    continue

                positive_item = False
                if self.is_response_right(resp):
                    self.result.append(word)
                    positive_item = True

                self.log_item(word, resp, positive_item)

                self.check_positive_limit_stop(self.result)

                need_retest = False
            except Queue.Empty:
                self.done = True
                break
            except ChunkedEncodingError as e:
                self.logger.ex(e)
            except BaseException as e:
                self.logger.ex(e)
                #self.queue.task_done(word)
项目:Sudoku-Solver    作者:ayush1997    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
项目:youtube-trending-music    作者:ishan-nitj    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
项目:MyFriend-Rob    作者:lcheniv    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
项目:twitter_word_count    作者:prrateekk    | 项目源码 | 文件源码
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.headers['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.info('Downloading from URL %s' % link)

        def resp_read(chunk_size):
            try:
                # Special case for urllib3.
                try:
                    for chunk in resp.raw.stream(
                            chunk_size, decode_content=False):
                        yield chunk
                except IncompleteRead as e:
                    raise ChunkedEncodingError(e)
            except AttributeError:
                # Standard file-like object.
                while True:
                    chunk = resp.raw.read(chunk_size)
                    if not chunk:
                        break
                    yield chunk

        for chunk in resp_read(4096):
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100 * downloaded / total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash