import unittest
import tornado.escape
from tornado.escape import (
utf8,
xhtml_escape,
xhtml_unescape,
url_escape,
url_unescape,
to_unicode,
json_decode,
json_encode,
squeeze,
recursive_unicode,
)
from tornado.util import unicode_type
from typing import List, Tuple, Union, Dict, Any # noqa: F401
linkify_tests = [
# (input, linkify_kwargs, expected_output)
(
"hello http://world.com/!",
{},
u'hello http://world.com/!',
),
(
"hello http://world.com/with?param=true&stuff=yes",
{},
u'hello http://world.com/with?param=true&stuff=yes', # noqa: E501
),
# an opened paren followed by many chars killed Gruber's regex
(
"http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
{},
u'http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', # noqa: E501
),
# as did too many dots at the end
(
"http://url.com/withmany.......................................",
{},
u'http://url.com/withmany.......................................', # noqa: E501
),
(
"http://url.com/withmany((((((((((((((((((((((((((((((((((a)",
{},
u'http://url.com/withmany((((((((((((((((((((((((((((((((((a)', # noqa: E501
),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
(
"http://foo.com/blah_blah",
{},
u'http://foo.com/blah_blah',
),
(
"http://foo.com/blah_blah/",
{},
u'http://foo.com/blah_blah/',
),
(
"(Something like http://foo.com/blah_blah)",
{},
u'(Something like http://foo.com/blah_blah)',
),
(
"http://foo.com/blah_blah_(wikipedia)",
{},
u'http://foo.com/blah_blah_(wikipedia)',
),
(
"http://foo.com/blah_(blah)_(wikipedia)_blah",
{},
u'http://foo.com/blah_(blah)_(wikipedia)_blah', # noqa: E501
),
(
"(Something like http://foo.com/blah_blah_(wikipedia))",
{},
u'(Something like http://foo.com/blah_blah_(wikipedia))', # noqa: E501
),
(
"http://foo.com/blah_blah.",
{},
u'http://foo.com/blah_blah.',
),
(
"http://foo.com/blah_blah/.",
{},
u'http://foo.com/blah_blah/.',
),
(
"",
{},
u'<http://foo.com/blah_blah>',
),
(
"",
{},
u'<http://foo.com/blah_blah/>',
),
(
"http://foo.com/blah_blah,",
{},
u'http://foo.com/blah_blah,',
),
(
"http://www.example.com/wpstyle/?p=364.",
{},
u'http://www.example.com/wpstyle/?p=364.', # noqa: E501
),
(
"rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u'rdar://1234',
),
(
"rdar:/1234",
{"permitted_protocols": ["rdar"]},
u'rdar:/1234',
),
(
"http://userid:password@example.com:8080",
{},
u'http://userid:password@example.com:8080', # noqa: E501
),
(
"http://userid@example.com",
{},
u'http://userid@example.com',
),
(
"http://userid@example.com:8080",
{},
u'http://userid@example.com:8080',
),
(
"http://userid:password@example.com",
{},
u'http://userid:password@example.com',
),
(
"message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
u''
u"message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
),
(
u"http://\u27a1.ws/\u4a39",
{},
u'http://\u27a1.ws/\u4a39',
),
(
"http://example.com",
{},
u'<tag>http://example.com</tag>',
),
(
"Just a www.example.com link.",
{},
u'Just a www.example.com link.',
),
(
"Just a www.example.com link.",
{"require_protocol": True},
u"Just a www.example.com link.",
),
(
"A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u'A http://reallylong.com/link...', # noqa: E501
),
(
"A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u'A http://reallylongdomainnametha...!', # noqa: E501
),
(
"A file:///passwords.txt and http://web.com link",
{},
u'A file:///passwords.txt and http://web.com link',
),
(
"A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u'A file:///passwords.txt and http://web.com link',
),
(
"www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u'www.external-link.com', # noqa: E501
),
(
"www.external-link.com and www.internal-link.com/blogs extra",
{
"extra_params": lambda href: 'class="internal"'
if href.startswith("http://www.internal-link.com")
else 'rel="nofollow" class="external"'
},
u'www.external-link.com' # noqa: E501
u' and www.internal-link.com/blogs extra', # noqa: E501
),
(
"www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u'www.external-link.com', # noqa: E501
),
] # type: List[Tuple[Union[str, bytes], Dict[str, Any], str]]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("", "<foo>"),
(u"", u"<foo>"),
(b"", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&"),
(u"<\u00e9>", u"<\u00e9>"),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
] # type: List[Tuple[Union[str, bytes], Union[str, bytes]]]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_xhtml_unescape_numeric(self):
tests = [
("foo bar", "foo bar"),
("foo bar", "foo bar"),
("foo bar", "foo bar"),
("foo઼bar", u"foo\u0abcbar"),
("fooyz;bar", "fooyz;bar"), # invalid encoding
("foobar", "foobar"), # invalid encoding
("foobar", "foobar"), # invalid encoding
]
for escaped, unescaped in tests:
self.assertEqual(unescaped, xhtml_unescape(escaped))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u"\u00e9".encode("utf8"), "%C3%A9"),
(u"\u00e9".encode("latin1"), "%E9"),
# unicode strings become utf8
(u"\u00e9", "%C3%A9"),
] # type: List[Tuple[Union[str, bytes], str]]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
("%C3%A9", u"\u00e9", "utf8"),
("%C3%A9", u"\u00c3\u00a9", "latin1"),
("%C3%A9", utf8(u"\u00e9"), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = "+ #%"
plus_escaped = "%2B+%23%25"
escaped = "%2B%20%23%25"
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None), utf8(unescaped))
self.assertEqual(
url_unescape(escaped, encoding=None, plus=False), utf8(unescaped)
)
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u"foo")
self.assertEqual(json_decode(u'"foo"'), u"foo")
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
if bytes is str:
self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
self.assertEqual(
squeeze(u"sequences of whitespace chars"),
u"sequences of whitespace chars",
)
def test_recursive_unicode(self):
tests = {
"dict": {b"foo": b"bar"},
"list": [b"foo", b"bar"],
"tuple": (b"foo", b"bar"),
"bytes": b"foo",
}
self.assertEqual(recursive_unicode(tests["dict"]), {u"foo": u"bar"})
self.assertEqual(recursive_unicode(tests["list"]), [u"foo", u"bar"])
self.assertEqual(recursive_unicode(tests["tuple"]), (u"foo", u"bar"))
self.assertEqual(recursive_unicode(tests["bytes"]), u"foo")