Python email.errors 模块,CharsetError() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用email.errors.CharsetError()

项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:Flask_Blog    作者:sugarguo    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower()
        # Set the input charset after filtering through the aliases
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:Flask_Blog    作者:sugarguo    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:Intranet-Penetration    作者:yuxiaokui    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:Intranet-Penetration    作者:yuxiaokui    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:MKFQ    作者:maojingios    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:MKFQ    作者:maojingios    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, str):
                input_charset.encode('ascii')
            else:
                input_charset = str(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower()
        # Set the input charset after filtering through the aliases
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset('us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_unknown_charset(self):
        self._test('=?foobar?q?foo=ACbar?=',
                   b'foo\xacbar'.decode('ascii', 'surrogateescape'),
                   charset = 'foobar',
                   # XXX Should this be a new Defect instead?
                   defects = [errors.CharsetError])
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset('us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:xxNet    作者:drzorm    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:xxNet    作者:drzorm    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_unknown_charset(self):
        self._test('=?foobar?q?foo=ACbar?=',
                   b'foo\xacbar'.decode('ascii', 'surrogateescape'),
                   charset = 'foobar',
                   # XXX Should this be a new Defect instead?
                   defects = [errors.CharsetError])
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset('us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, str):
                input_charset.encode('ascii')
            else:
                input_charset = str(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower()
        # Set the input charset after filtering through the aliases
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def decode(ew):
    """Decode encoded word and return (string, charset, lang, defects) tuple.

    An RFC 2047/2243 encoded word has the form:

        =?charset*lang?cte?encoded_string?=

    where '*lang' may be omitted but the other parts may not be.

    This function expects exactly such a string (that is, it does not check the
    syntax and may raise errors if the string is not well formed), and returns
    the encoded_string decoded first from its Content Transfer Encoding and
    then from the resulting bytes into unicode using the specified charset.  If
    the cte-decoded string does not successfully decode using the specified
    character set, a defect is added to the defects list and the unknown octets
    are replaced by the unicode 'unknown' character \\uFDFF.

    The specified charset and language are returned.  The default for language,
    which is rarely if ever encountered, is the empty string.

    """
    _, charset, cte, cte_string, _ = ew.split('?')
    charset, _, lang = charset.partition('*')
    cte = cte.lower()
    # Recover the original bytes and do CTE decoding.
    bstring = cte_string.encode('ascii', 'surrogateescape')
    bstring, defects = _cte_decoders[cte](bstring)
    # Turn the CTE decoded bytes into unicode.
    try:
        string = bstring.decode(charset)
    except UnicodeError:
        defects.append(errors.UndecodableBytesDefect("Encoded word "
            "contains bytes not decodable using {} charset".format(charset)))
        string = bstring.decode(charset, 'surrogateescape')
    except LookupError:
        string = bstring.decode('ascii', 'surrogateescape')
        if charset.lower() != 'unknown-8bit':
            defects.append(errors.CharsetError("Unknown charset {} "
                "in encoded word; decoded as unknown bytes".format(charset)))
    return string, charset, lang, defects
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:empyrion-python-api    作者:huhlig    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:PortalAuth    作者:sud0nick    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:pmatic    作者:LarsMichelsen    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_unknown_charset(self):
        self._test('=?foobar?q?foo=ACbar?=',
                   b'foo\xacbar'.decode('ascii', 'surrogateescape'),
                   charset = 'foobar',
                   # XXX Should this be a new Defect instead?
                   defects = [errors.CharsetError])
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset('us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, str):
                input_charset.encode('ascii')
            else:
                input_charset = str(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower()
        # Set the input charset after filtering through the aliases
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def decode(ew):
    """Decode encoded word and return (string, charset, lang, defects) tuple.

    An RFC 2047/2243 encoded word has the form:

        =?charset*lang?cte?encoded_string?=

    where '*lang' may be omitted but the other parts may not be.

    This function expects exactly such a string (that is, it does not check the
    syntax and may raise errors if the string is not well formed), and returns
    the encoded_string decoded first from its Content Transfer Encoding and
    then from the resulting bytes into unicode using the specified charset.  If
    the cte-decoded string does not successfully decode using the specified
    character set, a defect is added to the defects list and the unknown octets
    are replaced by the unicode 'unknown' character \uFDFF.

    The specified charset and language are returned.  The default for language,
    which is rarely if ever encountered, is the empty string.

    """
    _, charset, cte, cte_string, _ = ew.split('?')
    charset, _, lang = charset.partition('*')
    cte = cte.lower()
    # Recover the original bytes and do CTE decoding.
    bstring = cte_string.encode('ascii', 'surrogateescape')
    bstring, defects = _cte_decoders[cte](bstring)
    # Turn the CTE decoded bytes into unicode.
    try:
        string = bstring.decode(charset)
    except UnicodeError:
        defects.append(errors.UndecodableBytesDefect("Encoded word "
            "contains bytes not decodable using {} charset".format(charset)))
        string = bstring.decode(charset, 'surrogateescape')
    except LookupError:
        string = bstring.decode('ascii', 'surrogateescape')
        if charset.lower() != 'unknown-8bit':
            defects.append(errors.CharsetError("Unknown charset {} "
                "in encoded word; decoded as unknown bytes".format(charset)))
    return string, charset, lang, defects
项目:Docker-XX-Net    作者:kuanghy    | 项目源码 | 文件源码
def __init__(self, input_charset=DEFAULT_CHARSET):
        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
        # unicode because its .lower() is locale insensitive.  If the argument
        # is already a unicode, we leave it at that, but ensure that the
        # charset is ASCII, as the standard (RFC XXX) requires.
        try:
            if isinstance(input_charset, unicode):
                input_charset.encode('ascii')
            else:
                input_charset = unicode(input_charset, 'ascii')
        except UnicodeError:
            raise errors.CharsetError(input_charset)
        input_charset = input_charset.lower().encode('ascii')
        # Set the input charset after filtering through the aliases and/or codecs
        if not (input_charset in ALIASES or input_charset in CHARSETS):
            try:
                input_charset = codecs.lookup(input_charset).name
            except LookupError:
                pass
        self.input_charset = ALIASES.get(input_charset, input_charset)
        # We can try to guess which encoding and conversion to use by the
        # charset_map dictionary.  Try that first, but let the user override
        # it.
        henc, benc, conv = CHARSETS.get(self.input_charset,
                                        (SHORTEST, BASE64, None))
        if not conv:
            conv = self.input_charset
        # Set the attributes, allowing the arguments to override the default.
        self.header_encoding = henc
        self.body_encoding = benc
        self.output_charset = ALIASES.get(conv, conv)
        # Now set the codecs.  If one isn't defined for input_charset,
        # guess and try a Unicode codec with the same name as input_codec.
        self.input_codec = CODEC_MAP.get(self.input_charset,
                                         self.input_charset)
        self.output_codec = CODEC_MAP.get(self.output_charset,
                                          self.output_charset)
项目:Docker-XX-Net    作者:kuanghy    | 项目源码 | 文件源码
def test_unicode_charset_name(self):
        charset = Charset(u'us-ascii')
        self.assertEqual(str(charset), 'us-ascii')
        self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')



# Test multilingual MIME headers.