我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tornado.escape.to_unicode()。
def test_streaming_follow_redirects(self): # When following redirects, header and streaming callbacks # should only be called for the final result. # TODO(bdarnell): this test belongs in httpclient_test instead of # simple_httpclient_test, but it fails with the version of libcurl # available on travis-ci. Move it when that has been upgraded # or we have a better framework to skip tests based on curl version. headers = [] chunks = [] self.fetch("/redirect?url=/hello", header_callback=headers.append, streaming_callback=chunks.append) chunks = list(map(to_unicode, chunks)) self.assertEqual(chunks, ['Hello world!']) # Make sure we only got one set of headers. num_start_lines = len([h for h in headers if h.startswith("HTTP/")]) self.assertEqual(num_start_lines, 1)
def test_csv_bom(self): with open(os.path.join(os.path.dirname(__file__), 'csv_translations', 'fr_FR.csv'), 'rb') as f: char_data = to_unicode(f.read()) # Re-encode our input data (which is utf-8 without BOM) in # encodings that use the BOM and ensure that we can still load # it. Note that utf-16-le and utf-16-be do not write a BOM, # so we only test whichver variant is native to our platform. for encoding in ['utf-8-sig', 'utf-16']: tmpdir = tempfile.mkdtemp() try: with open(os.path.join(tmpdir, 'fr_FR.csv'), 'wb') as f: f.write(char_data.encode(encoding)) tornado.locale.load_translations(tmpdir) locale = tornado.locale.get('fr_FR') self.assertIsInstance(locale, tornado.locale.CSVLocale) self.assertEqual(locale.translate("school"), u("\u00e9cole")) finally: shutil.rmtree(tmpdir)
def __init__(self, template_string, name="<string>", loader=None, compress_whitespace=None, autoescape=_UNSET): self.name = name if compress_whitespace is None: compress_whitespace = name.endswith(".html") or \ name.endswith(".js") if autoescape is not _UNSET: self.autoescape = autoescape elif loader: self.autoescape = loader.autoescape else: self.autoescape = _DEFAULT_AUTOESCAPE self.namespace = loader.namespace if loader else {} reader = _TemplateReader(name, escape.native_str(template_string)) self.file = _File(_parse(reader, self)) self.code = self._generate_python(loader, compress_whitespace) try: self.compiled = compile(escape.to_unicode(self.code), "<template %s>" % self.name, "exec") except Exception: formatted_code = _format_code(self.code).rstrip() logging.error("%s code:\n%s", self.name, formatted_code) raise
def test_csv_bom(self): with open(os.path.join(os.path.dirname(__file__), 'csv_translations', 'fr_FR.csv'), 'rb') as f: char_data = to_unicode(f.read()) # Re-encode our input data (which is utf-8 without BOM) in # encodings that use the BOM and ensure that we can still load # it. Note that utf-16-le and utf-16-be do not write a BOM, # so we only test whichver variant is native to our platform. for encoding in ['utf-8-sig', 'utf-16']: tmpdir = tempfile.mkdtemp() try: with open(os.path.join(tmpdir, 'fr_FR.csv'), 'wb') as f: f.write(char_data.encode(encoding)) tornado.locale.load_translations(tmpdir) locale = tornado.locale.get('fr_FR') self.assertIsInstance(locale, tornado.locale.CSVLocale) self.assertEqual(locale.translate("school"), u"\u00e9cole") finally: shutil.rmtree(tmpdir)
def response_handler(self, msg): ident, resp_bytes = msg resp = json_decode(to_unicode(resp_bytes)) app_log.debug('resp: %s', resp) subid = self.subscriptions.get('commandExecute') if subid is not None: self.write_message(json_encode({ 'type': 'subscription_data', 'id': subid, 'payload': { 'data': resp } }))
def test_unicode_apply(self): def upper(s): return to_unicode(s).upper() template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}"))) self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_bytes_apply(self): def upper(s): return utf8(to_unicode(s).upper()) template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}"))) self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_utf8_in_file(self): tmpl = self.loader.load("utf8.html") result = tmpl.generate() self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
def decode_argument(self, value, name=None): if type(value) != bytes: raise Exception("unexpected type for value: %r" % type(value)) # use self.request.arguments directly to avoid recursion if 'encoding' in self.request.arguments: return value.decode(to_unicode(self.request.arguments['encoding'][0])) else: return value
def test_types(self): cookie_value = to_unicode(create_signed_value(self.COOKIE_SECRET, "asdf", "qwer")) response = self.fetch("/typecheck/asdf?foo=bar", headers={"Cookie": "asdf=" + cookie_value}) data = json_decode(response.body) self.assertEqual(data, {}) response = self.fetch("/typecheck/asdf?foo=bar", method="POST", headers={"Cookie": "asdf=" + cookie_value}, body="foo=bar")
def handle_read(self, data): logging.info("handle_read") data = to_unicode(data) if data == data.upper(): self.stream.write(b"error\talready capitalized\n") else: # data already has \n self.stream.write(utf8("ok\t%s" % data.upper())) self.stream.close()
def process_response(self, data): status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups() if status == 'ok': return message else: raise CapError(message)
def _handle_message(self, opcode, data): if self.client_terminated: return if self._frame_compressed: data = self._decompressor.decompress(data) if opcode == 0x1: # UTF-8 data self._message_bytes_in += len(data) try: decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() return self._run_callback(self.handler.on_message, decoded) elif opcode == 0x2: # Binary data self._message_bytes_in += len(data) self._run_callback(self.handler.on_message, data) elif opcode == 0x8: # Close self.client_terminated = True if len(data) >= 2: self.handler.close_code = struct.unpack('>H', data[:2])[0] if len(data) > 2: self.handler.close_reason = to_unicode(data[2:]) # Echo the received close code, if any (RFC 6455 section 5.5.1). self.close(self.handler.close_code) elif opcode == 0x9: # Ping self._write_frame(True, 0xA, data) elif opcode == 0xA: # Pong self._run_callback(self.handler.on_pong, data) else: self._abort()
def test_url_unescape_unicode(self): tests = [ ('%C3%A9', u('\u00e9'), 'utf8'), ('%C3%A9', u('\u00c3\u00a9'), 'latin1'), ('%C3%A9', utf8(u('\u00e9')), None), ] for escaped, unescaped, encoding in tests: # input strings to url_unescape should only contain ascii # characters, but make sure the function accepts both byte # and unicode strings. self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped) self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)