Uploaded Test files
This commit is contained in:
parent
f584ad9d97
commit
2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions
26
venv/Lib/site-packages/tornado/__init__.py
Normal file
26
venv/Lib/site-packages/tornado/__init__.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The Tornado web server and tools."""
|
||||
|
||||
# version is a human-readable version number.
|
||||
|
||||
# version_info is a four-tuple for programmatic comparison. The first
|
||||
# three numbers are the components of the version number. The fourth
|
||||
# is zero for an official release, positive for a development branch,
|
||||
# or negative for a release candidate or beta (after the base version
|
||||
# number has been incremented)
|
||||
version = "6.1"
|
||||
version_info = (6, 1, 0, 0)
|
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/auth.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/auth.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/escape.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/escape.cpython-36.pyc
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/gen.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/gen.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/ioloop.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/ioloop.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/locale.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/locale.cpython-36.pyc
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/locks.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/locks.cpython-36.pyc
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/log.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/log.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/queues.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/queues.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/util.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/util.cpython-36.pyc
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/web.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/web.cpython-36.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/tornado/__pycache__/wsgi.cpython-36.pyc
Normal file
BIN
venv/Lib/site-packages/tornado/__pycache__/wsgi.cpython-36.pyc
Normal file
Binary file not shown.
80
venv/Lib/site-packages/tornado/_locale_data.py
Normal file
80
venv/Lib/site-packages/tornado/_locale_data.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Data used by the tornado.locale module."""
|
||||
|
||||
LOCALE_NAMES = {
|
||||
"af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
|
||||
"am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
|
||||
"ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
|
||||
"bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
|
||||
"bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
|
||||
"bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
|
||||
"ca_ES": {"name_en": u"Catalan", "name": u"Català"},
|
||||
"cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
|
||||
"cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
|
||||
"da_DK": {"name_en": u"Danish", "name": u"Dansk"},
|
||||
"de_DE": {"name_en": u"German", "name": u"Deutsch"},
|
||||
"el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
|
||||
"en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
|
||||
"en_US": {"name_en": u"English (US)", "name": u"English (US)"},
|
||||
"es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
|
||||
"es_LA": {"name_en": u"Spanish", "name": u"Español"},
|
||||
"et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
|
||||
"eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
|
||||
"fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
|
||||
"fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
|
||||
"fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
|
||||
"fr_FR": {"name_en": u"French", "name": u"Français"},
|
||||
"ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
|
||||
"gl_ES": {"name_en": u"Galician", "name": u"Galego"},
|
||||
"he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
|
||||
"hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
|
||||
"hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
|
||||
"hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
|
||||
"id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
|
||||
"is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
|
||||
"it_IT": {"name_en": u"Italian", "name": u"Italiano"},
|
||||
"ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
|
||||
"ko_KR": {"name_en": u"Korean", "name": u"한국어"},
|
||||
"lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
|
||||
"lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
|
||||
"mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
|
||||
"ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
|
||||
"ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
|
||||
"nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
|
||||
"nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
|
||||
"nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
|
||||
"pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
|
||||
"pl_PL": {"name_en": u"Polish", "name": u"Polski"},
|
||||
"pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
|
||||
"pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
|
||||
"ro_RO": {"name_en": u"Romanian", "name": u"Română"},
|
||||
"ru_RU": {"name_en": u"Russian", "name": u"Русский"},
|
||||
"sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
|
||||
"sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
|
||||
"sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
|
||||
"sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
|
||||
"sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
|
||||
"sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
|
||||
"ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
|
||||
"te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
|
||||
"th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
|
||||
"tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
|
||||
"tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
|
||||
"uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
|
||||
"vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
|
||||
"zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
|
||||
"zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
|
||||
}
|
1187
venv/Lib/site-packages/tornado/auth.py
Normal file
1187
venv/Lib/site-packages/tornado/auth.py
Normal file
File diff suppressed because it is too large
Load diff
363
venv/Lib/site-packages/tornado/autoreload.py
Normal file
363
venv/Lib/site-packages/tornado/autoreload.py
Normal file
|
@ -0,0 +1,363 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Automatically restart the server when a source file is modified.
|
||||
|
||||
Most applications should not access this module directly. Instead,
|
||||
pass the keyword argument ``autoreload=True`` to the
|
||||
`tornado.web.Application` constructor (or ``debug=True``, which
|
||||
enables this setting and several others). This will enable autoreload
|
||||
mode as well as checking for changes to templates and static
|
||||
resources. Note that restarting is a destructive operation and any
|
||||
requests in progress will be aborted when the process restarts. (If
|
||||
you want to disable autoreload while using other debug-mode features,
|
||||
pass both ``debug=True`` and ``autoreload=False``).
|
||||
|
||||
This module can also be used as a command-line wrapper around scripts
|
||||
such as unit test runners. See the `main` method for details.
|
||||
|
||||
The command-line wrapper and Application debug modes can be used together.
|
||||
This combination is encouraged as the wrapper catches syntax errors and
|
||||
other import-time failures, while debug mode catches changes once
|
||||
the server has started.
|
||||
|
||||
This module will not work correctly when `.HTTPServer`'s multi-process
|
||||
mode is used.
|
||||
|
||||
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
|
||||
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
|
||||
Additionally, modifying these variables will cause reloading to behave
|
||||
incorrectly.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# sys.path handling
|
||||
# -----------------
|
||||
#
|
||||
# If a module is run with "python -m", the current directory (i.e. "")
|
||||
# is automatically prepended to sys.path, but not if it is run as
|
||||
# "path/to/file.py". The processing for "-m" rewrites the former to
|
||||
# the latter, so subsequent executions won't have the same path as the
|
||||
# original.
|
||||
#
|
||||
# Conversely, when run as path/to/file.py, the directory containing
|
||||
# file.py gets added to the path, which can cause confusion as imports
|
||||
# may become relative in spite of the future import.
|
||||
#
|
||||
# We address the former problem by reconstructing the original command
|
||||
# line (Python >= 3.4) or by setting the $PYTHONPATH environment
|
||||
# variable (Python < 3.4) before re-execution so the new process will
|
||||
# see the correct path. We attempt to address the latter problem when
|
||||
# tornado.autoreload is run as __main__.
|
||||
|
||||
if __name__ == "__main__":
|
||||
# This sys.path manipulation must come before our imports (as much
|
||||
# as possible - if we introduced a tornado.sys or tornado.os
|
||||
# module we'd be in trouble), or else our imports would become
|
||||
# relative again despite the future import.
|
||||
#
|
||||
# There is a separate __main__ block at the end of the file to call main().
|
||||
if sys.path[0] == os.path.dirname(__file__):
|
||||
del sys.path[0]
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import pkgutil # type: ignore
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import subprocess
|
||||
import weakref
|
||||
|
||||
from tornado import ioloop
|
||||
from tornado.log import gen_log
|
||||
from tornado import process
|
||||
from tornado.util import exec_in
|
||||
|
||||
try:
|
||||
import signal
|
||||
except ImportError:
|
||||
signal = None # type: ignore
|
||||
|
||||
import typing
|
||||
from typing import Callable, Dict
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import List, Optional, Union # noqa: F401
|
||||
|
||||
# os.execv is broken on Windows and can't properly parse command line
|
||||
# arguments and executable name if they contain whitespaces. subprocess
|
||||
# fixes that behavior.
|
||||
_has_execv = sys.platform != "win32"
|
||||
|
||||
_watched_files = set()
|
||||
_reload_hooks = []
|
||||
_reload_attempted = False
|
||||
_io_loops = weakref.WeakKeyDictionary() # type: ignore
|
||||
_autoreload_is_main = False
|
||||
_original_argv = None # type: Optional[List[str]]
|
||||
_original_spec = None
|
||||
|
||||
|
||||
def start(check_time: int = 500) -> None:
|
||||
"""Begins watching source files for changes.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
"""
|
||||
io_loop = ioloop.IOLoop.current()
|
||||
if io_loop in _io_loops:
|
||||
return
|
||||
_io_loops[io_loop] = True
|
||||
if len(_io_loops) > 1:
|
||||
gen_log.warning("tornado.autoreload started more than once in the same process")
|
||||
modify_times = {} # type: Dict[str, float]
|
||||
callback = functools.partial(_reload_on_update, modify_times)
|
||||
scheduler = ioloop.PeriodicCallback(callback, check_time)
|
||||
scheduler.start()
|
||||
|
||||
|
||||
def wait() -> None:
|
||||
"""Wait for a watched file to change, then restart the process.
|
||||
|
||||
Intended to be used at the end of scripts like unit test runners,
|
||||
to run the tests again after any source file changes (but see also
|
||||
the command-line interface in `main`)
|
||||
"""
|
||||
io_loop = ioloop.IOLoop()
|
||||
io_loop.add_callback(start)
|
||||
io_loop.start()
|
||||
|
||||
|
||||
def watch(filename: str) -> None:
|
||||
"""Add a file to the watch list.
|
||||
|
||||
All imported modules are watched by default.
|
||||
"""
|
||||
_watched_files.add(filename)
|
||||
|
||||
|
||||
def add_reload_hook(fn: Callable[[], None]) -> None:
|
||||
"""Add a function to be called before reloading the process.
|
||||
|
||||
Note that for open file and socket handles it is generally
|
||||
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
|
||||
`os.set_inheritable`) instead of using a reload hook to close them.
|
||||
"""
|
||||
_reload_hooks.append(fn)
|
||||
|
||||
|
||||
def _reload_on_update(modify_times: Dict[str, float]) -> None:
|
||||
if _reload_attempted:
|
||||
# We already tried to reload and it didn't work, so don't try again.
|
||||
return
|
||||
if process.task_id() is not None:
|
||||
# We're in a child process created by fork_processes. If child
|
||||
# processes restarted themselves, they'd all restart and then
|
||||
# all call fork_processes again.
|
||||
return
|
||||
for module in list(sys.modules.values()):
|
||||
# Some modules play games with sys.modules (e.g. email/__init__.py
|
||||
# in the standard library), and occasionally this can cause strange
|
||||
# failures in getattr. Just ignore anything that's not an ordinary
|
||||
# module.
|
||||
if not isinstance(module, types.ModuleType):
|
||||
continue
|
||||
path = getattr(module, "__file__", None)
|
||||
if not path:
|
||||
continue
|
||||
if path.endswith(".pyc") or path.endswith(".pyo"):
|
||||
path = path[:-1]
|
||||
_check_file(modify_times, path)
|
||||
for path in _watched_files:
|
||||
_check_file(modify_times, path)
|
||||
|
||||
|
||||
def _check_file(modify_times: Dict[str, float], path: str) -> None:
|
||||
try:
|
||||
modified = os.stat(path).st_mtime
|
||||
except Exception:
|
||||
return
|
||||
if path not in modify_times:
|
||||
modify_times[path] = modified
|
||||
return
|
||||
if modify_times[path] != modified:
|
||||
gen_log.info("%s modified; restarting server", path)
|
||||
_reload()
|
||||
|
||||
|
||||
def _reload() -> None:
|
||||
global _reload_attempted
|
||||
_reload_attempted = True
|
||||
for fn in _reload_hooks:
|
||||
fn()
|
||||
if hasattr(signal, "setitimer"):
|
||||
# Clear the alarm signal set by
|
||||
# ioloop.set_blocking_log_threshold so it doesn't fire
|
||||
# after the exec.
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
# sys.path fixes: see comments at top of file. If __main__.__spec__
|
||||
# exists, we were invoked with -m and the effective path is about to
|
||||
# change on re-exec. Reconstruct the original command line to
|
||||
# ensure that the new process sees the same path we did. If
|
||||
# __spec__ is not available (Python < 3.4), check instead if
|
||||
# sys.path[0] is an empty string and add the current directory to
|
||||
# $PYTHONPATH.
|
||||
if _autoreload_is_main:
|
||||
assert _original_argv is not None
|
||||
spec = _original_spec
|
||||
argv = _original_argv
|
||||
else:
|
||||
spec = getattr(sys.modules["__main__"], "__spec__", None)
|
||||
argv = sys.argv
|
||||
if spec:
|
||||
argv = ["-m", spec.name] + argv[1:]
|
||||
else:
|
||||
path_prefix = "." + os.pathsep
|
||||
if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith(
|
||||
path_prefix
|
||||
):
|
||||
os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "")
|
||||
if not _has_execv:
|
||||
subprocess.Popen([sys.executable] + argv)
|
||||
os._exit(0)
|
||||
else:
|
||||
try:
|
||||
os.execv(sys.executable, [sys.executable] + argv)
|
||||
except OSError:
|
||||
# Mac OS X versions prior to 10.6 do not support execv in
|
||||
# a process that contains multiple threads. Instead of
|
||||
# re-executing in the current process, start a new one
|
||||
# and cause the current process to exit. This isn't
|
||||
# ideal since the new process is detached from the parent
|
||||
# terminal and thus cannot easily be killed with ctrl-C,
|
||||
# but it's better than not being able to autoreload at
|
||||
# all.
|
||||
# Unfortunately the errno returned in this case does not
|
||||
# appear to be consistent, so we can't easily check for
|
||||
# this error specifically.
|
||||
os.spawnv(
|
||||
os.P_NOWAIT, sys.executable, [sys.executable] + argv # type: ignore
|
||||
)
|
||||
# At this point the IOLoop has been closed and finally
|
||||
# blocks will experience errors if we allow the stack to
|
||||
# unwind, so just exit uncleanly.
|
||||
os._exit(0)
|
||||
|
||||
|
||||
_USAGE = """\
|
||||
Usage:
|
||||
python -m tornado.autoreload -m module.to.run [args...]
|
||||
python -m tornado.autoreload path/to/script.py [args...]
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Command-line wrapper to re-run a script whenever its source changes.
|
||||
|
||||
Scripts may be specified by filename or module name::
|
||||
|
||||
python -m tornado.autoreload -m tornado.test.runtests
|
||||
python -m tornado.autoreload tornado/test/runtests.py
|
||||
|
||||
Running a script with this wrapper is similar to calling
|
||||
`tornado.autoreload.wait` at the end of the script, but this wrapper
|
||||
can catch import-time problems like syntax errors that would otherwise
|
||||
prevent the script from reaching its call to `wait`.
|
||||
"""
|
||||
# Remember that we were launched with autoreload as main.
|
||||
# The main module can be tricky; set the variables both in our globals
|
||||
# (which may be __main__) and the real importable version.
|
||||
import tornado.autoreload
|
||||
|
||||
global _autoreload_is_main
|
||||
global _original_argv, _original_spec
|
||||
tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
|
||||
original_argv = sys.argv
|
||||
tornado.autoreload._original_argv = _original_argv = original_argv
|
||||
original_spec = getattr(sys.modules["__main__"], "__spec__", None)
|
||||
tornado.autoreload._original_spec = _original_spec = original_spec
|
||||
sys.argv = sys.argv[:]
|
||||
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
|
||||
mode = "module"
|
||||
module = sys.argv[2]
|
||||
del sys.argv[1:3]
|
||||
elif len(sys.argv) >= 2:
|
||||
mode = "script"
|
||||
script = sys.argv[1]
|
||||
sys.argv = sys.argv[1:]
|
||||
else:
|
||||
print(_USAGE, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if mode == "module":
|
||||
import runpy
|
||||
|
||||
runpy.run_module(module, run_name="__main__", alter_sys=True)
|
||||
elif mode == "script":
|
||||
with open(script) as f:
|
||||
# Execute the script in our namespace instead of creating
|
||||
# a new one so that something that tries to import __main__
|
||||
# (e.g. the unittest module) will see names defined in the
|
||||
# script instead of just those defined in this module.
|
||||
global __file__
|
||||
__file__ = script
|
||||
# If __package__ is defined, imports may be incorrectly
|
||||
# interpreted as relative to this module.
|
||||
global __package__
|
||||
del __package__
|
||||
exec_in(f.read(), globals(), globals())
|
||||
except SystemExit as e:
|
||||
logging.basicConfig()
|
||||
gen_log.info("Script exited with status %s", e.code)
|
||||
except Exception as e:
|
||||
logging.basicConfig()
|
||||
gen_log.warning("Script exited with uncaught exception", exc_info=True)
|
||||
# If an exception occurred at import time, the file with the error
|
||||
# never made it into sys.modules and so we won't know to watch it.
|
||||
# Just to make sure we've covered everything, walk the stack trace
|
||||
# from the exception and watch every file.
|
||||
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
|
||||
watch(filename)
|
||||
if isinstance(e, SyntaxError):
|
||||
# SyntaxErrors are special: their innermost stack frame is fake
|
||||
# so extract_tb won't see it and we have to get the filename
|
||||
# from the exception object.
|
||||
watch(e.filename)
|
||||
else:
|
||||
logging.basicConfig()
|
||||
gen_log.info("Script exited normally")
|
||||
# restore sys.argv so subsequent executions will include autoreload
|
||||
sys.argv = original_argv
|
||||
|
||||
if mode == "module":
|
||||
# runpy did a fake import of the module as __main__, but now it's
|
||||
# no longer in sys.modules. Figure out where it is and watch it.
|
||||
loader = pkgutil.get_loader(module)
|
||||
if loader is not None:
|
||||
watch(loader.get_filename()) # type: ignore
|
||||
|
||||
wait()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# See also the other __main__ block at the top of the file, which modifies
|
||||
# sys.path before our imports
|
||||
main()
|
263
venv/Lib/site-packages/tornado/concurrent.py
Normal file
263
venv/Lib/site-packages/tornado/concurrent.py
Normal file
|
@ -0,0 +1,263 @@
|
|||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Utilities for working with ``Future`` objects.
|
||||
|
||||
Tornado previously provided its own ``Future`` class, but now uses
|
||||
`asyncio.Future`. This module contains utility functions for working
|
||||
with `asyncio.Future` in a way that is backwards-compatible with
|
||||
Tornado's old ``Future`` implementation.
|
||||
|
||||
While this module is an important part of Tornado's internal
|
||||
implementation, applications rarely need to interact with it
|
||||
directly.
|
||||
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from concurrent import futures
|
||||
import functools
|
||||
import sys
|
||||
import types
|
||||
|
||||
from tornado.log import app_log
|
||||
|
||||
import typing
|
||||
from typing import Any, Callable, Optional, Tuple, Union
|
||||
|
||||
_T = typing.TypeVar("_T")
|
||||
|
||||
|
||||
class ReturnValueIgnoredError(Exception):
|
||||
# No longer used; was previously used by @return_future
|
||||
pass
|
||||
|
||||
|
||||
Future = asyncio.Future
|
||||
|
||||
FUTURES = (futures.Future, Future)
|
||||
|
||||
|
||||
def is_future(x: Any) -> bool:
|
||||
return isinstance(x, FUTURES)
|
||||
|
||||
|
||||
class DummyExecutor(futures.Executor):
|
||||
def submit(
|
||||
self, fn: Callable[..., _T], *args: Any, **kwargs: Any
|
||||
) -> "futures.Future[_T]":
|
||||
future = futures.Future() # type: futures.Future[_T]
|
||||
try:
|
||||
future_set_result_unless_cancelled(future, fn(*args, **kwargs))
|
||||
except Exception:
|
||||
future_set_exc_info(future, sys.exc_info())
|
||||
return future
|
||||
|
||||
def shutdown(self, wait: bool = True) -> None:
|
||||
pass
|
||||
|
||||
|
||||
dummy_executor = DummyExecutor()
|
||||
|
||||
|
||||
def run_on_executor(*args: Any, **kwargs: Any) -> Callable:
|
||||
"""Decorator to run a synchronous method asynchronously on an executor.
|
||||
|
||||
Returns a future.
|
||||
|
||||
The executor to be used is determined by the ``executor``
|
||||
attributes of ``self``. To use a different attribute name, pass a
|
||||
keyword argument to the decorator::
|
||||
|
||||
@run_on_executor(executor='_thread_pool')
|
||||
def foo(self):
|
||||
pass
|
||||
|
||||
This decorator should not be confused with the similarly-named
|
||||
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
|
||||
when *calling* a blocking method is recommended instead of using
|
||||
this decorator when *defining* a method. If compatibility with older
|
||||
versions of Tornado is required, consider defining an executor
|
||||
and using ``executor.submit()`` at the call site.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
Added keyword arguments to use alternative attributes.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
Always uses the current IOLoop instead of ``self.io_loop``.
|
||||
|
||||
.. versionchanged:: 5.1
|
||||
Returns a `.Future` compatible with ``await`` instead of a
|
||||
`concurrent.futures.Future`.
|
||||
|
||||
.. deprecated:: 5.1
|
||||
|
||||
The ``callback`` argument is deprecated and will be removed in
|
||||
6.0. The decorator itself is discouraged in new code but will
|
||||
not be removed in 6.0.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
The ``callback`` argument was removed.
|
||||
"""
|
||||
# Fully type-checking decorators is tricky, and this one is
|
||||
# discouraged anyway so it doesn't have all the generic magic.
|
||||
def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]:
|
||||
executor = kwargs.get("executor", "executor")
|
||||
|
||||
@functools.wraps(fn)
|
||||
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future:
|
||||
async_future = Future() # type: Future
|
||||
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
|
||||
chain_future(conc_future, async_future)
|
||||
return async_future
|
||||
|
||||
return wrapper
|
||||
|
||||
if args and kwargs:
|
||||
raise ValueError("cannot combine positional and keyword args")
|
||||
if len(args) == 1:
|
||||
return run_on_executor_decorator(args[0])
|
||||
elif len(args) != 0:
|
||||
raise ValueError("expected 1 argument, got %d", len(args))
|
||||
return run_on_executor_decorator
|
||||
|
||||
|
||||
_NO_RESULT = object()
|
||||
|
||||
|
||||
def chain_future(a: "Future[_T]", b: "Future[_T]") -> None:
|
||||
"""Chain two futures together so that when one completes, so does the other.
|
||||
|
||||
The result (success or failure) of ``a`` will be copied to ``b``, unless
|
||||
``b`` has already been completed or cancelled by the time ``a`` finishes.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Now accepts both Tornado/asyncio `Future` objects and
|
||||
`concurrent.futures.Future`.
|
||||
|
||||
"""
|
||||
|
||||
def copy(future: "Future[_T]") -> None:
|
||||
assert future is a
|
||||
if b.done():
|
||||
return
|
||||
if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore
|
||||
future_set_exc_info(b, a.exc_info()) # type: ignore
|
||||
elif a.exception() is not None:
|
||||
b.set_exception(a.exception())
|
||||
else:
|
||||
b.set_result(a.result())
|
||||
|
||||
if isinstance(a, Future):
|
||||
future_add_done_callback(a, copy)
|
||||
else:
|
||||
# concurrent.futures.Future
|
||||
from tornado.ioloop import IOLoop
|
||||
|
||||
IOLoop.current().add_future(a, copy)
|
||||
|
||||
|
||||
def future_set_result_unless_cancelled(
|
||||
future: "Union[futures.Future[_T], Future[_T]]", value: _T
|
||||
) -> None:
|
||||
"""Set the given ``value`` as the `Future`'s result, if not cancelled.
|
||||
|
||||
Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on
|
||||
a cancelled `asyncio.Future`.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
if not future.cancelled():
|
||||
future.set_result(value)
|
||||
|
||||
|
||||
def future_set_exception_unless_cancelled(
|
||||
future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException
|
||||
) -> None:
|
||||
"""Set the given ``exc`` as the `Future`'s exception.
|
||||
|
||||
If the Future is already canceled, logs the exception instead. If
|
||||
this logging is not desired, the caller should explicitly check
|
||||
the state of the Future and call ``Future.set_exception`` instead of
|
||||
this wrapper.
|
||||
|
||||
Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on
|
||||
a cancelled `asyncio.Future`.
|
||||
|
||||
.. versionadded:: 6.0
|
||||
|
||||
"""
|
||||
if not future.cancelled():
|
||||
future.set_exception(exc)
|
||||
else:
|
||||
app_log.error("Exception after Future was cancelled", exc_info=exc)
|
||||
|
||||
|
||||
def future_set_exc_info(
|
||||
future: "Union[futures.Future[_T], Future[_T]]",
|
||||
exc_info: Tuple[
|
||||
Optional[type], Optional[BaseException], Optional[types.TracebackType]
|
||||
],
|
||||
) -> None:
|
||||
"""Set the given ``exc_info`` as the `Future`'s exception.
|
||||
|
||||
Understands both `asyncio.Future` and the extensions in older
|
||||
versions of Tornado to enable better tracebacks on Python 2.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
If the future is already cancelled, this function is a no-op.
|
||||
(previously ``asyncio.InvalidStateError`` would be raised)
|
||||
|
||||
"""
|
||||
if exc_info[1] is None:
|
||||
raise Exception("future_set_exc_info called with no exception")
|
||||
future_set_exception_unless_cancelled(future, exc_info[1])
|
||||
|
||||
|
||||
@typing.overload
|
||||
def future_add_done_callback(
|
||||
future: "futures.Future[_T]", callback: Callable[["futures.Future[_T]"], None]
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def future_add_done_callback(
|
||||
future: "Future[_T]", callback: Callable[["Future[_T]"], None]
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def future_add_done_callback( # noqa: F811
|
||||
future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None]
|
||||
) -> None:
|
||||
"""Arrange to call ``callback`` when ``future`` is complete.
|
||||
|
||||
``callback`` is invoked with one argument, the ``future``.
|
||||
|
||||
If ``future`` is already done, ``callback`` is invoked immediately.
|
||||
This may differ from the behavior of ``Future.add_done_callback``,
|
||||
which makes no such guarantee.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
if future.done():
|
||||
callback(future)
|
||||
else:
|
||||
future.add_done_callback(callback)
|
583
venv/Lib/site-packages/tornado/curl_httpclient.py
Normal file
583
venv/Lib/site-packages/tornado/curl_httpclient.py
Normal file
|
@ -0,0 +1,583 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Non-blocking HTTP client implementation using pycurl."""
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import logging
|
||||
import pycurl
|
||||
import threading
|
||||
import time
|
||||
from io import BytesIO
|
||||
|
||||
from tornado import httputil
|
||||
from tornado import ioloop
|
||||
|
||||
from tornado.escape import utf8, native_str
|
||||
from tornado.httpclient import (
|
||||
HTTPRequest,
|
||||
HTTPResponse,
|
||||
HTTPError,
|
||||
AsyncHTTPClient,
|
||||
main,
|
||||
)
|
||||
from tornado.log import app_log
|
||||
|
||||
from typing import Dict, Any, Callable, Union, Tuple, Optional
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Deque # noqa: F401
|
||||
|
||||
curl_log = logging.getLogger("tornado.curl_httpclient")
|
||||
|
||||
|
||||
class CurlAsyncHTTPClient(AsyncHTTPClient):
|
||||
def initialize( # type: ignore
|
||||
self, max_clients: int = 10, defaults: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
super().initialize(defaults=defaults)
|
||||
# Typeshed is incomplete for CurlMulti, so just use Any for now.
|
||||
self._multi = pycurl.CurlMulti() # type: Any
|
||||
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
|
||||
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
|
||||
self._curls = [self._curl_create() for i in range(max_clients)]
|
||||
self._free_list = self._curls[:]
|
||||
self._requests = (
|
||||
collections.deque()
|
||||
) # type: Deque[Tuple[HTTPRequest, Callable[[HTTPResponse], None], float]]
|
||||
self._fds = {} # type: Dict[int, int]
|
||||
self._timeout = None # type: Optional[object]
|
||||
|
||||
# libcurl has bugs that sometimes cause it to not report all
|
||||
# relevant file descriptors and timeouts to TIMERFUNCTION/
|
||||
# SOCKETFUNCTION. Mitigate the effects of such bugs by
|
||||
# forcing a periodic scan of all active requests.
|
||||
self._force_timeout_callback = ioloop.PeriodicCallback(
|
||||
self._handle_force_timeout, 1000
|
||||
)
|
||||
self._force_timeout_callback.start()
|
||||
|
||||
# Work around a bug in libcurl 7.29.0: Some fields in the curl
|
||||
# multi object are initialized lazily, and its destructor will
|
||||
# segfault if it is destroyed without having been used. Add
|
||||
# and remove a dummy handle to make sure everything is
|
||||
# initialized.
|
||||
dummy_curl_handle = pycurl.Curl()
|
||||
self._multi.add_handle(dummy_curl_handle)
|
||||
self._multi.remove_handle(dummy_curl_handle)
|
||||
|
||||
def close(self) -> None:
|
||||
self._force_timeout_callback.stop()
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
for curl in self._curls:
|
||||
curl.close()
|
||||
self._multi.close()
|
||||
super().close()
|
||||
|
||||
# Set below properties to None to reduce the reference count of current
|
||||
# instance, because those properties hold some methods of current
|
||||
# instance that will case circular reference.
|
||||
self._force_timeout_callback = None # type: ignore
|
||||
self._multi = None
|
||||
|
||||
def fetch_impl(
|
||||
self, request: HTTPRequest, callback: Callable[[HTTPResponse], None]
|
||||
) -> None:
|
||||
self._requests.append((request, callback, self.io_loop.time()))
|
||||
self._process_queue()
|
||||
self._set_timeout(0)
|
||||
|
||||
def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None:
|
||||
"""Called by libcurl when it wants to change the file descriptors
|
||||
it cares about.
|
||||
"""
|
||||
event_map = {
|
||||
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
|
||||
pycurl.POLL_IN: ioloop.IOLoop.READ,
|
||||
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
|
||||
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE,
|
||||
}
|
||||
if event == pycurl.POLL_REMOVE:
|
||||
if fd in self._fds:
|
||||
self.io_loop.remove_handler(fd)
|
||||
del self._fds[fd]
|
||||
else:
|
||||
ioloop_event = event_map[event]
|
||||
# libcurl sometimes closes a socket and then opens a new
|
||||
# one using the same FD without giving us a POLL_NONE in
|
||||
# between. This is a problem with the epoll IOLoop,
|
||||
# because the kernel can tell when a socket is closed and
|
||||
# removes it from the epoll automatically, causing future
|
||||
# update_handler calls to fail. Since we can't tell when
|
||||
# this has happened, always use remove and re-add
|
||||
# instead of update.
|
||||
if fd in self._fds:
|
||||
self.io_loop.remove_handler(fd)
|
||||
self.io_loop.add_handler(fd, self._handle_events, ioloop_event)
|
||||
self._fds[fd] = ioloop_event
|
||||
|
||||
def _set_timeout(self, msecs: int) -> None:
|
||||
"""Called by libcurl to schedule a timeout."""
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.io_loop.time() + msecs / 1000.0, self._handle_timeout
|
||||
)
|
||||
|
||||
def _handle_events(self, fd: int, events: int) -> None:
|
||||
"""Called by IOLoop when there is activity on one of our
|
||||
file descriptors.
|
||||
"""
|
||||
action = 0
|
||||
if events & ioloop.IOLoop.READ:
|
||||
action |= pycurl.CSELECT_IN
|
||||
if events & ioloop.IOLoop.WRITE:
|
||||
action |= pycurl.CSELECT_OUT
|
||||
while True:
|
||||
try:
|
||||
ret, num_handles = self._multi.socket_action(fd, action)
|
||||
except pycurl.error as e:
|
||||
ret = e.args[0]
|
||||
if ret != pycurl.E_CALL_MULTI_PERFORM:
|
||||
break
|
||||
self._finish_pending_requests()
|
||||
|
||||
def _handle_timeout(self) -> None:
|
||||
"""Called by IOLoop when the requested timeout has passed."""
|
||||
self._timeout = None
|
||||
while True:
|
||||
try:
|
||||
ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0)
|
||||
except pycurl.error as e:
|
||||
ret = e.args[0]
|
||||
if ret != pycurl.E_CALL_MULTI_PERFORM:
|
||||
break
|
||||
self._finish_pending_requests()
|
||||
|
||||
# In theory, we shouldn't have to do this because curl will
|
||||
# call _set_timeout whenever the timeout changes. However,
|
||||
# sometimes after _handle_timeout we will need to reschedule
|
||||
# immediately even though nothing has changed from curl's
|
||||
# perspective. This is because when socket_action is
|
||||
# called with SOCKET_TIMEOUT, libcurl decides internally which
|
||||
# timeouts need to be processed by using a monotonic clock
|
||||
# (where available) while tornado uses python's time.time()
|
||||
# to decide when timeouts have occurred. When those clocks
|
||||
# disagree on elapsed time (as they will whenever there is an
|
||||
# NTP adjustment), tornado might call _handle_timeout before
|
||||
# libcurl is ready. After each timeout, resync the scheduled
|
||||
# timeout with libcurl's current state.
|
||||
new_timeout = self._multi.timeout()
|
||||
if new_timeout >= 0:
|
||||
self._set_timeout(new_timeout)
|
||||
|
||||
def _handle_force_timeout(self) -> None:
|
||||
"""Called by IOLoop periodically to ask libcurl to process any
|
||||
events it may have forgotten about.
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
ret, num_handles = self._multi.socket_all()
|
||||
except pycurl.error as e:
|
||||
ret = e.args[0]
|
||||
if ret != pycurl.E_CALL_MULTI_PERFORM:
|
||||
break
|
||||
self._finish_pending_requests()
|
||||
|
||||
def _finish_pending_requests(self) -> None:
|
||||
"""Process any requests that were completed by the last
|
||||
call to multi.socket_action.
|
||||
"""
|
||||
while True:
|
||||
num_q, ok_list, err_list = self._multi.info_read()
|
||||
for curl in ok_list:
|
||||
self._finish(curl)
|
||||
for curl, errnum, errmsg in err_list:
|
||||
self._finish(curl, errnum, errmsg)
|
||||
if num_q == 0:
|
||||
break
|
||||
self._process_queue()
|
||||
|
||||
def _process_queue(self) -> None:
|
||||
while True:
|
||||
started = 0
|
||||
while self._free_list and self._requests:
|
||||
started += 1
|
||||
curl = self._free_list.pop()
|
||||
(request, callback, queue_start_time) = self._requests.popleft()
|
||||
# TODO: Don't smuggle extra data on an attribute of the Curl object.
|
||||
curl.info = { # type: ignore
|
||||
"headers": httputil.HTTPHeaders(),
|
||||
"buffer": BytesIO(),
|
||||
"request": request,
|
||||
"callback": callback,
|
||||
"queue_start_time": queue_start_time,
|
||||
"curl_start_time": time.time(),
|
||||
"curl_start_ioloop_time": self.io_loop.current().time(),
|
||||
}
|
||||
try:
|
||||
self._curl_setup_request(
|
||||
curl,
|
||||
request,
|
||||
curl.info["buffer"], # type: ignore
|
||||
curl.info["headers"], # type: ignore
|
||||
)
|
||||
except Exception as e:
|
||||
# If there was an error in setup, pass it on
|
||||
# to the callback. Note that allowing the
|
||||
# error to escape here will appear to work
|
||||
# most of the time since we are still in the
|
||||
# caller's original stack frame, but when
|
||||
# _process_queue() is called from
|
||||
# _finish_pending_requests the exceptions have
|
||||
# nowhere to go.
|
||||
self._free_list.append(curl)
|
||||
callback(HTTPResponse(request=request, code=599, error=e))
|
||||
else:
|
||||
self._multi.add_handle(curl)
|
||||
|
||||
if not started:
|
||||
break
|
||||
|
||||
def _finish(
|
||||
self,
|
||||
curl: pycurl.Curl,
|
||||
curl_error: Optional[int] = None,
|
||||
curl_message: Optional[str] = None,
|
||||
) -> None:
|
||||
info = curl.info # type: ignore
|
||||
curl.info = None # type: ignore
|
||||
self._multi.remove_handle(curl)
|
||||
self._free_list.append(curl)
|
||||
buffer = info["buffer"]
|
||||
if curl_error:
|
||||
assert curl_message is not None
|
||||
error = CurlError(curl_error, curl_message) # type: Optional[CurlError]
|
||||
assert error is not None
|
||||
code = error.code
|
||||
effective_url = None
|
||||
buffer.close()
|
||||
buffer = None
|
||||
else:
|
||||
error = None
|
||||
code = curl.getinfo(pycurl.HTTP_CODE)
|
||||
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
|
||||
buffer.seek(0)
|
||||
# the various curl timings are documented at
|
||||
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
|
||||
time_info = dict(
|
||||
queue=info["curl_start_ioloop_time"] - info["queue_start_time"],
|
||||
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
|
||||
connect=curl.getinfo(pycurl.CONNECT_TIME),
|
||||
appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
|
||||
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
|
||||
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
|
||||
total=curl.getinfo(pycurl.TOTAL_TIME),
|
||||
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
|
||||
)
|
||||
try:
|
||||
info["callback"](
|
||||
HTTPResponse(
|
||||
request=info["request"],
|
||||
code=code,
|
||||
headers=info["headers"],
|
||||
buffer=buffer,
|
||||
effective_url=effective_url,
|
||||
error=error,
|
||||
reason=info["headers"].get("X-Http-Reason", None),
|
||||
request_time=self.io_loop.time() - info["curl_start_ioloop_time"],
|
||||
start_time=info["curl_start_time"],
|
||||
time_info=time_info,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
self.handle_callback_exception(info["callback"])
|
||||
|
||||
def handle_callback_exception(self, callback: Any) -> None:
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
|
||||
def _curl_create(self) -> pycurl.Curl:
|
||||
curl = pycurl.Curl()
|
||||
if curl_log.isEnabledFor(logging.DEBUG):
|
||||
curl.setopt(pycurl.VERBOSE, 1)
|
||||
curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
|
||||
if hasattr(
|
||||
pycurl, "PROTOCOLS"
|
||||
): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12)
|
||||
curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
|
||||
curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
|
||||
return curl
|
||||
|
||||
def _curl_setup_request(
|
||||
self,
|
||||
curl: pycurl.Curl,
|
||||
request: HTTPRequest,
|
||||
buffer: BytesIO,
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> None:
|
||||
curl.setopt(pycurl.URL, native_str(request.url))
|
||||
|
||||
# libcurl's magic "Expect: 100-continue" behavior causes delays
|
||||
# with servers that don't support it (which include, among others,
|
||||
# Google's OpenID endpoint). Additionally, this behavior has
|
||||
# a bug in conjunction with the curl_multi_socket_action API
|
||||
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
|
||||
# which increases the delays. It's more trouble than it's worth,
|
||||
# so just turn off the feature (yes, setting Expect: to an empty
|
||||
# value is the official way to disable this)
|
||||
if "Expect" not in request.headers:
|
||||
request.headers["Expect"] = ""
|
||||
|
||||
# libcurl adds Pragma: no-cache by default; disable that too
|
||||
if "Pragma" not in request.headers:
|
||||
request.headers["Pragma"] = ""
|
||||
|
||||
curl.setopt(
|
||||
pycurl.HTTPHEADER,
|
||||
[
|
||||
"%s: %s" % (native_str(k), native_str(v))
|
||||
for k, v in request.headers.get_all()
|
||||
],
|
||||
)
|
||||
|
||||
curl.setopt(
|
||||
pycurl.HEADERFUNCTION,
|
||||
functools.partial(
|
||||
self._curl_header_callback, headers, request.header_callback
|
||||
),
|
||||
)
|
||||
if request.streaming_callback:
|
||||
|
||||
def write_function(b: Union[bytes, bytearray]) -> int:
|
||||
assert request.streaming_callback is not None
|
||||
self.io_loop.add_callback(request.streaming_callback, b)
|
||||
return len(b)
|
||||
|
||||
else:
|
||||
write_function = buffer.write
|
||||
curl.setopt(pycurl.WRITEFUNCTION, write_function)
|
||||
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
|
||||
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
|
||||
assert request.connect_timeout is not None
|
||||
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
|
||||
assert request.request_timeout is not None
|
||||
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
|
||||
if request.user_agent:
|
||||
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
|
||||
else:
|
||||
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
|
||||
if request.network_interface:
|
||||
curl.setopt(pycurl.INTERFACE, request.network_interface)
|
||||
if request.decompress_response:
|
||||
curl.setopt(pycurl.ENCODING, "gzip,deflate")
|
||||
else:
|
||||
curl.setopt(pycurl.ENCODING, None)
|
||||
if request.proxy_host and request.proxy_port:
|
||||
curl.setopt(pycurl.PROXY, request.proxy_host)
|
||||
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
|
||||
if request.proxy_username:
|
||||
assert request.proxy_password is not None
|
||||
credentials = httputil.encode_username_password(
|
||||
request.proxy_username, request.proxy_password
|
||||
)
|
||||
curl.setopt(pycurl.PROXYUSERPWD, credentials)
|
||||
|
||||
if request.proxy_auth_mode is None or request.proxy_auth_mode == "basic":
|
||||
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC)
|
||||
elif request.proxy_auth_mode == "digest":
|
||||
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported proxy_auth_mode %s" % request.proxy_auth_mode
|
||||
)
|
||||
else:
|
||||
try:
|
||||
curl.unsetopt(pycurl.PROXY)
|
||||
except TypeError: # not supported, disable proxy
|
||||
curl.setopt(pycurl.PROXY, "")
|
||||
curl.unsetopt(pycurl.PROXYUSERPWD)
|
||||
if request.validate_cert:
|
||||
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
|
||||
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
|
||||
else:
|
||||
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
|
||||
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
|
||||
if request.ca_certs is not None:
|
||||
curl.setopt(pycurl.CAINFO, request.ca_certs)
|
||||
else:
|
||||
# There is no way to restore pycurl.CAINFO to its default value
|
||||
# (Using unsetopt makes it reject all certificates).
|
||||
# I don't see any way to read the default value from python so it
|
||||
# can be restored later. We'll have to just leave CAINFO untouched
|
||||
# if no ca_certs file was specified, and require that if any
|
||||
# request uses a custom ca_certs file, they all must.
|
||||
pass
|
||||
|
||||
if request.allow_ipv6 is False:
|
||||
# Curl behaves reasonably when DNS resolution gives an ipv6 address
|
||||
# that we can't reach, so allow ipv6 unless the user asks to disable.
|
||||
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
|
||||
else:
|
||||
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
|
||||
|
||||
# Set the request method through curl's irritating interface which makes
|
||||
# up names for almost every single method
|
||||
curl_options = {
|
||||
"GET": pycurl.HTTPGET,
|
||||
"POST": pycurl.POST,
|
||||
"PUT": pycurl.UPLOAD,
|
||||
"HEAD": pycurl.NOBODY,
|
||||
}
|
||||
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
|
||||
for o in curl_options.values():
|
||||
curl.setopt(o, False)
|
||||
if request.method in curl_options:
|
||||
curl.unsetopt(pycurl.CUSTOMREQUEST)
|
||||
curl.setopt(curl_options[request.method], True)
|
||||
elif request.allow_nonstandard_methods or request.method in custom_methods:
|
||||
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
|
||||
else:
|
||||
raise KeyError("unknown method " + request.method)
|
||||
|
||||
body_expected = request.method in ("POST", "PATCH", "PUT")
|
||||
body_present = request.body is not None
|
||||
if not request.allow_nonstandard_methods:
|
||||
# Some HTTP methods nearly always have bodies while others
|
||||
# almost never do. Fail in this case unless the user has
|
||||
# opted out of sanity checks with allow_nonstandard_methods.
|
||||
if (body_expected and not body_present) or (
|
||||
body_present and not body_expected
|
||||
):
|
||||
raise ValueError(
|
||||
"Body must %sbe None for method %s (unless "
|
||||
"allow_nonstandard_methods is true)"
|
||||
% ("not " if body_expected else "", request.method)
|
||||
)
|
||||
|
||||
if body_expected or body_present:
|
||||
if request.method == "GET":
|
||||
# Even with `allow_nonstandard_methods` we disallow
|
||||
# GET with a body (because libcurl doesn't allow it
|
||||
# unless we use CUSTOMREQUEST). While the spec doesn't
|
||||
# forbid clients from sending a body, it arguably
|
||||
# disallows the server from doing anything with them.
|
||||
raise ValueError("Body must be None for GET request")
|
||||
request_buffer = BytesIO(utf8(request.body or ""))
|
||||
|
||||
def ioctl(cmd: int) -> None:
|
||||
if cmd == curl.IOCMD_RESTARTREAD: # type: ignore
|
||||
request_buffer.seek(0)
|
||||
|
||||
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
|
||||
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
|
||||
if request.method == "POST":
|
||||
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or ""))
|
||||
else:
|
||||
curl.setopt(pycurl.UPLOAD, True)
|
||||
curl.setopt(pycurl.INFILESIZE, len(request.body or ""))
|
||||
|
||||
if request.auth_username is not None:
|
||||
assert request.auth_password is not None
|
||||
if request.auth_mode is None or request.auth_mode == "basic":
|
||||
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
|
||||
elif request.auth_mode == "digest":
|
||||
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
|
||||
else:
|
||||
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
|
||||
|
||||
userpwd = httputil.encode_username_password(
|
||||
request.auth_username, request.auth_password
|
||||
)
|
||||
curl.setopt(pycurl.USERPWD, userpwd)
|
||||
curl_log.debug(
|
||||
"%s %s (username: %r)",
|
||||
request.method,
|
||||
request.url,
|
||||
request.auth_username,
|
||||
)
|
||||
else:
|
||||
curl.unsetopt(pycurl.USERPWD)
|
||||
curl_log.debug("%s %s", request.method, request.url)
|
||||
|
||||
if request.client_cert is not None:
|
||||
curl.setopt(pycurl.SSLCERT, request.client_cert)
|
||||
|
||||
if request.client_key is not None:
|
||||
curl.setopt(pycurl.SSLKEY, request.client_key)
|
||||
|
||||
if request.ssl_options is not None:
|
||||
raise ValueError("ssl_options not supported in curl_httpclient")
|
||||
|
||||
if threading.active_count() > 1:
|
||||
# libcurl/pycurl is not thread-safe by default. When multiple threads
|
||||
# are used, signals should be disabled. This has the side effect
|
||||
# of disabling DNS timeouts in some environments (when libcurl is
|
||||
# not linked against ares), so we don't do it when there is only one
|
||||
# thread. Applications that use many short-lived threads may need
|
||||
# to set NOSIGNAL manually in a prepare_curl_callback since
|
||||
# there may not be any other threads running at the time we call
|
||||
# threading.activeCount.
|
||||
curl.setopt(pycurl.NOSIGNAL, 1)
|
||||
if request.prepare_curl_callback is not None:
|
||||
request.prepare_curl_callback(curl)
|
||||
|
||||
def _curl_header_callback(
|
||||
self,
|
||||
headers: httputil.HTTPHeaders,
|
||||
header_callback: Callable[[str], None],
|
||||
header_line_bytes: bytes,
|
||||
) -> None:
|
||||
header_line = native_str(header_line_bytes.decode("latin1"))
|
||||
if header_callback is not None:
|
||||
self.io_loop.add_callback(header_callback, header_line)
|
||||
# header_line as returned by curl includes the end-of-line characters.
|
||||
# whitespace at the start should be preserved to allow multi-line headers
|
||||
header_line = header_line.rstrip()
|
||||
if header_line.startswith("HTTP/"):
|
||||
headers.clear()
|
||||
try:
|
||||
(__, __, reason) = httputil.parse_response_start_line(header_line)
|
||||
header_line = "X-Http-Reason: %s" % reason
|
||||
except httputil.HTTPInputError:
|
||||
return
|
||||
if not header_line:
|
||||
return
|
||||
headers.parse_line(header_line)
|
||||
|
||||
def _curl_debug(self, debug_type: int, debug_msg: str) -> None:
|
||||
debug_types = ("I", "<", ">", "<", ">")
|
||||
if debug_type == 0:
|
||||
debug_msg = native_str(debug_msg)
|
||||
curl_log.debug("%s", debug_msg.strip())
|
||||
elif debug_type in (1, 2):
|
||||
debug_msg = native_str(debug_msg)
|
||||
for line in debug_msg.splitlines():
|
||||
curl_log.debug("%s %s", debug_types[debug_type], line)
|
||||
elif debug_type == 4:
|
||||
curl_log.debug("%s %r", debug_types[debug_type], debug_msg)
|
||||
|
||||
|
||||
class CurlError(HTTPError):
|
||||
def __init__(self, errno: int, message: str) -> None:
|
||||
HTTPError.__init__(self, 599, message)
|
||||
self.errno = errno
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
|
||||
main()
|
402
venv/Lib/site-packages/tornado/escape.py
Normal file
402
venv/Lib/site-packages/tornado/escape.py
Normal file
|
@ -0,0 +1,402 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
|
||||
|
||||
Also includes a few other miscellaneous string manipulation functions that
|
||||
have crept in over time.
|
||||
"""
|
||||
|
||||
import html.entities
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from tornado.util import unicode_type
|
||||
|
||||
import typing
|
||||
from typing import Union, Any, Optional, Dict, List, Callable
|
||||
|
||||
|
||||
_XHTML_ESCAPE_RE = re.compile("[&<>\"']")
|
||||
_XHTML_ESCAPE_DICT = {
|
||||
"&": "&",
|
||||
"<": "<",
|
||||
">": ">",
|
||||
'"': """,
|
||||
"'": "'",
|
||||
}
|
||||
|
||||
|
||||
def xhtml_escape(value: Union[str, bytes]) -> str:
|
||||
"""Escapes a string so it is valid within HTML or XML.
|
||||
|
||||
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
|
||||
When used in attribute values the escaped strings must be enclosed
|
||||
in quotes.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
|
||||
Added the single quote to the list of escaped characters.
|
||||
"""
|
||||
return _XHTML_ESCAPE_RE.sub(
|
||||
lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value)
|
||||
)
|
||||
|
||||
|
||||
def xhtml_unescape(value: Union[str, bytes]) -> str:
|
||||
"""Un-escapes an XML-escaped string."""
|
||||
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
|
||||
|
||||
|
||||
# The fact that json_encode wraps json.dumps is an implementation detail.
|
||||
# Please see https://github.com/tornadoweb/tornado/pull/706
|
||||
# before sending a pull request that adds **kwargs to this function.
|
||||
def json_encode(value: Any) -> str:
|
||||
"""JSON-encodes the given Python object."""
|
||||
# JSON permits but does not require forward slashes to be escaped.
|
||||
# This is useful when json data is emitted in a <script> tag
|
||||
# in HTML, as it prevents </script> tags from prematurely terminating
|
||||
# the JavaScript. Some json libraries do this escaping by default,
|
||||
# although python's standard library does not, so we do it here.
|
||||
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
|
||||
return json.dumps(value).replace("</", "<\\/")
|
||||
|
||||
|
||||
def json_decode(value: Union[str, bytes]) -> Any:
|
||||
"""Returns Python objects for the given JSON string.
|
||||
|
||||
Supports both `str` and `bytes` inputs.
|
||||
"""
|
||||
return json.loads(to_basestring(value))
|
||||
|
||||
|
||||
def squeeze(value: str) -> str:
|
||||
"""Replace all sequences of whitespace chars with a single space."""
|
||||
return re.sub(r"[\x00-\x20]+", " ", value).strip()
|
||||
|
||||
|
||||
def url_escape(value: Union[str, bytes], plus: bool = True) -> str:
|
||||
"""Returns a URL-encoded version of the given value.
|
||||
|
||||
If ``plus`` is true (the default), spaces will be represented
|
||||
as "+" instead of "%20". This is appropriate for query strings
|
||||
but not for the path component of a URL. Note that this default
|
||||
is the reverse of Python's urllib module.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
The ``plus`` argument
|
||||
"""
|
||||
quote = urllib.parse.quote_plus if plus else urllib.parse.quote
|
||||
return quote(utf8(value))
|
||||
|
||||
|
||||
@typing.overload
|
||||
def url_unescape(value: Union[str, bytes], encoding: None, plus: bool = True) -> bytes:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def url_unescape(
|
||||
value: Union[str, bytes], encoding: str = "utf-8", plus: bool = True
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
|
||||
def url_unescape( # noqa: F811
|
||||
value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True
|
||||
) -> Union[str, bytes]:
|
||||
"""Decodes the given value from a URL.
|
||||
|
||||
The argument may be either a byte or unicode string.
|
||||
|
||||
If encoding is None, the result will be a byte string. Otherwise,
|
||||
the result is a unicode string in the specified encoding.
|
||||
|
||||
If ``plus`` is true (the default), plus signs will be interpreted
|
||||
as spaces (literal plus signs must be represented as "%2B"). This
|
||||
is appropriate for query strings and form-encoded values but not
|
||||
for the path component of a URL. Note that this default is the
|
||||
reverse of Python's urllib module.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
The ``plus`` argument
|
||||
"""
|
||||
if encoding is None:
|
||||
if plus:
|
||||
# unquote_to_bytes doesn't have a _plus variant
|
||||
value = to_basestring(value).replace("+", " ")
|
||||
return urllib.parse.unquote_to_bytes(value)
|
||||
else:
|
||||
unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote
|
||||
return unquote(to_basestring(value), encoding=encoding)
|
||||
|
||||
|
||||
def parse_qs_bytes(
|
||||
qs: Union[str, bytes], keep_blank_values: bool = False, strict_parsing: bool = False
|
||||
) -> Dict[str, List[bytes]]:
|
||||
"""Parses a query string like urlparse.parse_qs,
|
||||
but takes bytes and returns the values as byte strings.
|
||||
|
||||
Keys still become type str (interpreted as latin1 in python3!)
|
||||
because it's too painful to keep them as byte strings in
|
||||
python3 and in practice they're nearly always ascii anyway.
|
||||
"""
|
||||
# This is gross, but python3 doesn't give us another way.
|
||||
# Latin1 is the universal donor of character encodings.
|
||||
if isinstance(qs, bytes):
|
||||
qs = qs.decode("latin1")
|
||||
result = urllib.parse.parse_qs(
|
||||
qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict"
|
||||
)
|
||||
encoded = {}
|
||||
for k, v in result.items():
|
||||
encoded[k] = [i.encode("latin1") for i in v]
|
||||
return encoded
|
||||
|
||||
|
||||
_UTF8_TYPES = (bytes, type(None))
|
||||
|
||||
|
||||
@typing.overload
|
||||
def utf8(value: bytes) -> bytes:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def utf8(value: str) -> bytes:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def utf8(value: None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811
|
||||
"""Converts a string argument to a byte string.
|
||||
|
||||
If the argument is already a byte string or None, it is returned unchanged.
|
||||
Otherwise it must be a unicode string and is encoded as utf8.
|
||||
"""
|
||||
if isinstance(value, _UTF8_TYPES):
|
||||
return value
|
||||
if not isinstance(value, unicode_type):
|
||||
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
|
||||
return value.encode("utf-8")
|
||||
|
||||
|
||||
_TO_UNICODE_TYPES = (unicode_type, type(None))
|
||||
|
||||
|
||||
@typing.overload
|
||||
def to_unicode(value: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def to_unicode(value: bytes) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def to_unicode(value: None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def to_unicode(value: Union[None, str, bytes]) -> Optional[str]: # noqa: F811
|
||||
"""Converts a string argument to a unicode string.
|
||||
|
||||
If the argument is already a unicode string or None, it is returned
|
||||
unchanged. Otherwise it must be a byte string and is decoded as utf8.
|
||||
"""
|
||||
if isinstance(value, _TO_UNICODE_TYPES):
|
||||
return value
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
|
||||
return value.decode("utf-8")
|
||||
|
||||
|
||||
# to_unicode was previously named _unicode not because it was private,
|
||||
# but to avoid conflicts with the built-in unicode() function/type
|
||||
_unicode = to_unicode
|
||||
|
||||
# When dealing with the standard library across python 2 and 3 it is
|
||||
# sometimes useful to have a direct conversion to the native string type
|
||||
native_str = to_unicode
|
||||
to_basestring = to_unicode
|
||||
|
||||
|
||||
def recursive_unicode(obj: Any) -> Any:
|
||||
"""Walks a simple data structure, converting byte strings to unicode.
|
||||
|
||||
Supports lists, tuples, and dictionaries.
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return dict(
|
||||
(recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()
|
||||
)
|
||||
elif isinstance(obj, list):
|
||||
return list(recursive_unicode(i) for i in obj)
|
||||
elif isinstance(obj, tuple):
|
||||
return tuple(recursive_unicode(i) for i in obj)
|
||||
elif isinstance(obj, bytes):
|
||||
return to_unicode(obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
# I originally used the regex from
|
||||
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
|
||||
# but it gets all exponential on certain patterns (such as too many trailing
|
||||
# dots), causing the regex matcher to never return.
|
||||
# This regex should avoid those problems.
|
||||
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
|
||||
# processed as escapes.
|
||||
_URL_RE = re.compile(
|
||||
to_unicode(
|
||||
r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def linkify(
|
||||
text: Union[str, bytes],
|
||||
shorten: bool = False,
|
||||
extra_params: Union[str, Callable[[str], str]] = "",
|
||||
require_protocol: bool = False,
|
||||
permitted_protocols: List[str] = ["http", "https"],
|
||||
) -> str:
|
||||
"""Converts plain text into HTML with links.
|
||||
|
||||
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
|
||||
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
|
||||
|
||||
Parameters:
|
||||
|
||||
* ``shorten``: Long urls will be shortened for display.
|
||||
|
||||
* ``extra_params``: Extra text to include in the link tag, or a callable
|
||||
taking the link as an argument and returning the extra text
|
||||
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
|
||||
or::
|
||||
|
||||
def extra_params_cb(url):
|
||||
if url.startswith("http://example.com"):
|
||||
return 'class="internal"'
|
||||
else:
|
||||
return 'class="external" rel="nofollow"'
|
||||
linkify(text, extra_params=extra_params_cb)
|
||||
|
||||
* ``require_protocol``: Only linkify urls which include a protocol. If
|
||||
this is False, urls such as www.facebook.com will also be linkified.
|
||||
|
||||
* ``permitted_protocols``: List (or set) of protocols which should be
|
||||
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
|
||||
"mailto"])``. It is very unsafe to include protocols such as
|
||||
``javascript``.
|
||||
"""
|
||||
if extra_params and not callable(extra_params):
|
||||
extra_params = " " + extra_params.strip()
|
||||
|
||||
def make_link(m: typing.Match) -> str:
|
||||
url = m.group(1)
|
||||
proto = m.group(2)
|
||||
if require_protocol and not proto:
|
||||
return url # not protocol, no linkify
|
||||
|
||||
if proto and proto not in permitted_protocols:
|
||||
return url # bad protocol, no linkify
|
||||
|
||||
href = m.group(1)
|
||||
if not proto:
|
||||
href = "http://" + href # no proto specified, use http
|
||||
|
||||
if callable(extra_params):
|
||||
params = " " + extra_params(href).strip()
|
||||
else:
|
||||
params = extra_params
|
||||
|
||||
# clip long urls. max_len is just an approximation
|
||||
max_len = 30
|
||||
if shorten and len(url) > max_len:
|
||||
before_clip = url
|
||||
if proto:
|
||||
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
|
||||
else:
|
||||
proto_len = 0
|
||||
|
||||
parts = url[proto_len:].split("/")
|
||||
if len(parts) > 1:
|
||||
# Grab the whole host part plus the first bit of the path
|
||||
# The path is usually not that interesting once shortened
|
||||
# (no more slug, etc), so it really just provides a little
|
||||
# extra indication of shortening.
|
||||
url = (
|
||||
url[:proto_len]
|
||||
+ parts[0]
|
||||
+ "/"
|
||||
+ parts[1][:8].split("?")[0].split(".")[0]
|
||||
)
|
||||
|
||||
if len(url) > max_len * 1.5: # still too long
|
||||
url = url[:max_len]
|
||||
|
||||
if url != before_clip:
|
||||
amp = url.rfind("&")
|
||||
# avoid splitting html char entities
|
||||
if amp > max_len - 5:
|
||||
url = url[:amp]
|
||||
url += "..."
|
||||
|
||||
if len(url) >= len(before_clip):
|
||||
url = before_clip
|
||||
else:
|
||||
# full url is visible on mouse-over (for those who don't
|
||||
# have a status bar, such as Safari by default)
|
||||
params += ' title="%s"' % href
|
||||
|
||||
return u'<a href="%s"%s>%s</a>' % (href, params, url)
|
||||
|
||||
# First HTML-escape so that our strings are all safe.
|
||||
# The regex is modified to avoid character entites other than & so
|
||||
# that we won't pick up ", etc.
|
||||
text = _unicode(xhtml_escape(text))
|
||||
return _URL_RE.sub(make_link, text)
|
||||
|
||||
|
||||
def _convert_entity(m: typing.Match) -> str:
|
||||
if m.group(1) == "#":
|
||||
try:
|
||||
if m.group(2)[:1].lower() == "x":
|
||||
return chr(int(m.group(2)[1:], 16))
|
||||
else:
|
||||
return chr(int(m.group(2)))
|
||||
except ValueError:
|
||||
return "&#%s;" % m.group(2)
|
||||
try:
|
||||
return _HTML_UNICODE_MAP[m.group(2)]
|
||||
except KeyError:
|
||||
return "&%s;" % m.group(2)
|
||||
|
||||
|
||||
def _build_unicode_map() -> Dict[str, str]:
|
||||
unicode_map = {}
|
||||
for name, value in html.entities.name2codepoint.items():
|
||||
unicode_map[name] = chr(value)
|
||||
return unicode_map
|
||||
|
||||
|
||||
_HTML_UNICODE_MAP = _build_unicode_map()
|
872
venv/Lib/site-packages/tornado/gen.py
Normal file
872
venv/Lib/site-packages/tornado/gen.py
Normal file
|
@ -0,0 +1,872 @@
|
|||
"""``tornado.gen`` implements generator-based coroutines.
|
||||
|
||||
.. note::
|
||||
|
||||
The "decorator and generator" approach in this module is a
|
||||
precursor to native coroutines (using ``async def`` and ``await``)
|
||||
which were introduced in Python 3.5. Applications that do not
|
||||
require compatibility with older versions of Python should use
|
||||
native coroutines instead. Some parts of this module are still
|
||||
useful with native coroutines, notably `multi`, `sleep`,
|
||||
`WaitIterator`, and `with_timeout`. Some of these functions have
|
||||
counterparts in the `asyncio` module which may be used as well,
|
||||
although the two may not necessarily be 100% compatible.
|
||||
|
||||
Coroutines provide an easier way to work in an asynchronous
|
||||
environment than chaining callbacks. Code using coroutines is
|
||||
technically asynchronous, but it is written as a single generator
|
||||
instead of a collection of separate functions.
|
||||
|
||||
For example, here's a coroutine-based handler:
|
||||
|
||||
.. testcode::
|
||||
|
||||
class GenAsyncHandler(RequestHandler):
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
http_client = AsyncHTTPClient()
|
||||
response = yield http_client.fetch("http://example.com")
|
||||
do_something_with_response(response)
|
||||
self.render("template.html")
|
||||
|
||||
.. testoutput::
|
||||
:hide:
|
||||
|
||||
Asynchronous functions in Tornado return an ``Awaitable`` or `.Future`;
|
||||
yielding this object returns its result.
|
||||
|
||||
You can also yield a list or dict of other yieldable objects, which
|
||||
will be started at the same time and run in parallel; a list or dict
|
||||
of results will be returned when they are all finished:
|
||||
|
||||
.. testcode::
|
||||
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
http_client = AsyncHTTPClient()
|
||||
response1, response2 = yield [http_client.fetch(url1),
|
||||
http_client.fetch(url2)]
|
||||
response_dict = yield dict(response3=http_client.fetch(url3),
|
||||
response4=http_client.fetch(url4))
|
||||
response3 = response_dict['response3']
|
||||
response4 = response_dict['response4']
|
||||
|
||||
.. testoutput::
|
||||
:hide:
|
||||
|
||||
If ``tornado.platform.twisted`` is imported, it is also possible to
|
||||
yield Twisted's ``Deferred`` objects. See the `convert_yielded`
|
||||
function to extend this mechanism.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
Dict support added.
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
|
||||
via ``singledispatch``.
|
||||
|
||||
"""
|
||||
import asyncio
|
||||
import builtins
|
||||
import collections
|
||||
from collections.abc import Generator
|
||||
import concurrent.futures
|
||||
import datetime
|
||||
import functools
|
||||
from functools import singledispatch
|
||||
from inspect import isawaitable
|
||||
import sys
|
||||
import types
|
||||
|
||||
from tornado.concurrent import (
|
||||
Future,
|
||||
is_future,
|
||||
chain_future,
|
||||
future_set_exc_info,
|
||||
future_add_done_callback,
|
||||
future_set_result_unless_cancelled,
|
||||
)
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.log import app_log
|
||||
from tornado.util import TimeoutError
|
||||
|
||||
try:
|
||||
import contextvars
|
||||
except ImportError:
|
||||
contextvars = None # type: ignore
|
||||
|
||||
import typing
|
||||
from typing import Union, Any, Callable, List, Type, Tuple, Awaitable, Dict, overload
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Sequence, Deque, Optional, Set, Iterable # noqa: F401
|
||||
|
||||
_T = typing.TypeVar("_T")
|
||||
|
||||
_Yieldable = Union[
|
||||
None, Awaitable, List[Awaitable], Dict[Any, Awaitable], concurrent.futures.Future
|
||||
]
|
||||
|
||||
|
||||
class KeyReuseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownKeyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LeakedCallbackError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BadYieldError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ReturnValueIgnoredError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _value_from_stopiteration(e: Union[StopIteration, "Return"]) -> Any:
|
||||
try:
|
||||
# StopIteration has a value attribute beginning in py33.
|
||||
# So does our Return class.
|
||||
return e.value
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
# Cython backports coroutine functionality by putting the value in
|
||||
# e.args[0].
|
||||
return e.args[0]
|
||||
except (AttributeError, IndexError):
|
||||
return None
|
||||
|
||||
|
||||
def _create_future() -> Future:
|
||||
future = Future() # type: Future
|
||||
# Fixup asyncio debug info by removing extraneous stack entries
|
||||
source_traceback = getattr(future, "_source_traceback", ())
|
||||
while source_traceback:
|
||||
# Each traceback entry is equivalent to a
|
||||
# (filename, self.lineno, self.name, self.line) tuple
|
||||
filename = source_traceback[-1][0]
|
||||
if filename == __file__:
|
||||
del source_traceback[-1]
|
||||
else:
|
||||
break
|
||||
return future
|
||||
|
||||
|
||||
def _fake_ctx_run(f: Callable[..., _T], *args: Any, **kw: Any) -> _T:
|
||||
return f(*args, **kw)
|
||||
|
||||
|
||||
@overload
|
||||
def coroutine(
|
||||
func: Callable[..., "Generator[Any, Any, _T]"]
|
||||
) -> Callable[..., "Future[_T]"]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def coroutine(func: Callable[..., _T]) -> Callable[..., "Future[_T]"]:
|
||||
...
|
||||
|
||||
|
||||
def coroutine(
|
||||
func: Union[Callable[..., "Generator[Any, Any, _T]"], Callable[..., _T]]
|
||||
) -> Callable[..., "Future[_T]"]:
|
||||
"""Decorator for asynchronous generators.
|
||||
|
||||
For compatibility with older versions of Python, coroutines may
|
||||
also "return" by raising the special exception `Return(value)
|
||||
<Return>`.
|
||||
|
||||
Functions with this decorator return a `.Future`.
|
||||
|
||||
.. warning::
|
||||
|
||||
When exceptions occur inside a coroutine, the exception
|
||||
information will be stored in the `.Future` object. You must
|
||||
examine the result of the `.Future` object, or the exception
|
||||
may go unnoticed by your code. This means yielding the function
|
||||
if called from another coroutine, using something like
|
||||
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
|
||||
to `.IOLoop.add_future`.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
The ``callback`` argument was removed. Use the returned
|
||||
awaitable object instead.
|
||||
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
# type: (*Any, **Any) -> Future[_T]
|
||||
# This function is type-annotated with a comment to work around
|
||||
# https://bitbucket.org/pypy/pypy/issues/2868/segfault-with-args-type-annotation-in
|
||||
future = _create_future()
|
||||
if contextvars is not None:
|
||||
ctx_run = contextvars.copy_context().run # type: Callable
|
||||
else:
|
||||
ctx_run = _fake_ctx_run
|
||||
try:
|
||||
result = ctx_run(func, *args, **kwargs)
|
||||
except (Return, StopIteration) as e:
|
||||
result = _value_from_stopiteration(e)
|
||||
except Exception:
|
||||
future_set_exc_info(future, sys.exc_info())
|
||||
try:
|
||||
return future
|
||||
finally:
|
||||
# Avoid circular references
|
||||
future = None # type: ignore
|
||||
else:
|
||||
if isinstance(result, Generator):
|
||||
# Inline the first iteration of Runner.run. This lets us
|
||||
# avoid the cost of creating a Runner when the coroutine
|
||||
# never actually yields, which in turn allows us to
|
||||
# use "optional" coroutines in critical path code without
|
||||
# performance penalty for the synchronous case.
|
||||
try:
|
||||
yielded = ctx_run(next, result)
|
||||
except (StopIteration, Return) as e:
|
||||
future_set_result_unless_cancelled(
|
||||
future, _value_from_stopiteration(e)
|
||||
)
|
||||
except Exception:
|
||||
future_set_exc_info(future, sys.exc_info())
|
||||
else:
|
||||
# Provide strong references to Runner objects as long
|
||||
# as their result future objects also have strong
|
||||
# references (typically from the parent coroutine's
|
||||
# Runner). This keeps the coroutine's Runner alive.
|
||||
# We do this by exploiting the public API
|
||||
# add_done_callback() instead of putting a private
|
||||
# attribute on the Future.
|
||||
# (GitHub issues #1769, #2229).
|
||||
runner = Runner(ctx_run, result, future, yielded)
|
||||
future.add_done_callback(lambda _: runner)
|
||||
yielded = None
|
||||
try:
|
||||
return future
|
||||
finally:
|
||||
# Subtle memory optimization: if next() raised an exception,
|
||||
# the future's exc_info contains a traceback which
|
||||
# includes this stack frame. This creates a cycle,
|
||||
# which will be collected at the next full GC but has
|
||||
# been shown to greatly increase memory usage of
|
||||
# benchmarks (relative to the refcount-based scheme
|
||||
# used in the absence of cycles). We can avoid the
|
||||
# cycle by clearing the local variable after we return it.
|
||||
future = None # type: ignore
|
||||
future_set_result_unless_cancelled(future, result)
|
||||
return future
|
||||
|
||||
wrapper.__wrapped__ = func # type: ignore
|
||||
wrapper.__tornado_coroutine__ = True # type: ignore
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_coroutine_function(func: Any) -> bool:
|
||||
"""Return whether *func* is a coroutine function, i.e. a function
|
||||
wrapped with `~.gen.coroutine`.
|
||||
|
||||
.. versionadded:: 4.5
|
||||
"""
|
||||
return getattr(func, "__tornado_coroutine__", False)
|
||||
|
||||
|
||||
class Return(Exception):
|
||||
"""Special exception to return a value from a `coroutine`.
|
||||
|
||||
If this exception is raised, its value argument is used as the
|
||||
result of the coroutine::
|
||||
|
||||
@gen.coroutine
|
||||
def fetch_json(url):
|
||||
response = yield AsyncHTTPClient().fetch(url)
|
||||
raise gen.Return(json_decode(response.body))
|
||||
|
||||
In Python 3.3, this exception is no longer necessary: the ``return``
|
||||
statement can be used directly to return a value (previously
|
||||
``yield`` and ``return`` with a value could not be combined in the
|
||||
same function).
|
||||
|
||||
By analogy with the return statement, the value argument is optional,
|
||||
but it is never necessary to ``raise gen.Return()``. The ``return``
|
||||
statement can be used with no arguments instead.
|
||||
"""
|
||||
|
||||
def __init__(self, value: Any = None) -> None:
|
||||
super().__init__()
|
||||
self.value = value
|
||||
# Cython recognizes subclasses of StopIteration with a .args tuple.
|
||||
self.args = (value,)
|
||||
|
||||
|
||||
class WaitIterator(object):
|
||||
"""Provides an iterator to yield the results of awaitables as they finish.
|
||||
|
||||
Yielding a set of awaitables like this:
|
||||
|
||||
``results = yield [awaitable1, awaitable2]``
|
||||
|
||||
pauses the coroutine until both ``awaitable1`` and ``awaitable2``
|
||||
return, and then restarts the coroutine with the results of both
|
||||
awaitables. If either awaitable raises an exception, the
|
||||
expression will raise that exception and all the results will be
|
||||
lost.
|
||||
|
||||
If you need to get the result of each awaitable as soon as possible,
|
||||
or if you need the result of some awaitables even if others produce
|
||||
errors, you can use ``WaitIterator``::
|
||||
|
||||
wait_iterator = gen.WaitIterator(awaitable1, awaitable2)
|
||||
while not wait_iterator.done():
|
||||
try:
|
||||
result = yield wait_iterator.next()
|
||||
except Exception as e:
|
||||
print("Error {} from {}".format(e, wait_iterator.current_future))
|
||||
else:
|
||||
print("Result {} received from {} at {}".format(
|
||||
result, wait_iterator.current_future,
|
||||
wait_iterator.current_index))
|
||||
|
||||
Because results are returned as soon as they are available the
|
||||
output from the iterator *will not be in the same order as the
|
||||
input arguments*. If you need to know which future produced the
|
||||
current result, you can use the attributes
|
||||
``WaitIterator.current_future``, or ``WaitIterator.current_index``
|
||||
to get the index of the awaitable from the input list. (if keyword
|
||||
arguments were used in the construction of the `WaitIterator`,
|
||||
``current_index`` will use the corresponding keyword).
|
||||
|
||||
On Python 3.5, `WaitIterator` implements the async iterator
|
||||
protocol, so it can be used with the ``async for`` statement (note
|
||||
that in this version the entire iteration is aborted if any value
|
||||
raises an exception, while the previous example can continue past
|
||||
individual errors)::
|
||||
|
||||
async for result in gen.WaitIterator(future1, future2):
|
||||
print("Result {} received from {} at {}".format(
|
||||
result, wait_iterator.current_future,
|
||||
wait_iterator.current_index))
|
||||
|
||||
.. versionadded:: 4.1
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Added ``async for`` support in Python 3.5.
|
||||
|
||||
"""
|
||||
|
||||
_unfinished = {} # type: Dict[Future, Union[int, str]]
|
||||
|
||||
def __init__(self, *args: Future, **kwargs: Future) -> None:
|
||||
if args and kwargs:
|
||||
raise ValueError("You must provide args or kwargs, not both")
|
||||
|
||||
if kwargs:
|
||||
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
|
||||
futures = list(kwargs.values()) # type: Sequence[Future]
|
||||
else:
|
||||
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
|
||||
futures = args
|
||||
|
||||
self._finished = collections.deque() # type: Deque[Future]
|
||||
self.current_index = None # type: Optional[Union[str, int]]
|
||||
self.current_future = None # type: Optional[Future]
|
||||
self._running_future = None # type: Optional[Future]
|
||||
|
||||
for future in futures:
|
||||
future_add_done_callback(future, self._done_callback)
|
||||
|
||||
def done(self) -> bool:
|
||||
"""Returns True if this iterator has no more results."""
|
||||
if self._finished or self._unfinished:
|
||||
return False
|
||||
# Clear the 'current' values when iteration is done.
|
||||
self.current_index = self.current_future = None
|
||||
return True
|
||||
|
||||
def next(self) -> Future:
|
||||
"""Returns a `.Future` that will yield the next available result.
|
||||
|
||||
Note that this `.Future` will not be the same object as any of
|
||||
the inputs.
|
||||
"""
|
||||
self._running_future = Future()
|
||||
|
||||
if self._finished:
|
||||
self._return_result(self._finished.popleft())
|
||||
|
||||
return self._running_future
|
||||
|
||||
def _done_callback(self, done: Future) -> None:
|
||||
if self._running_future and not self._running_future.done():
|
||||
self._return_result(done)
|
||||
else:
|
||||
self._finished.append(done)
|
||||
|
||||
def _return_result(self, done: Future) -> None:
|
||||
"""Called set the returned future's state that of the future
|
||||
we yielded, and set the current future for the iterator.
|
||||
"""
|
||||
if self._running_future is None:
|
||||
raise Exception("no future is running")
|
||||
chain_future(done, self._running_future)
|
||||
|
||||
self.current_future = done
|
||||
self.current_index = self._unfinished.pop(done)
|
||||
|
||||
def __aiter__(self) -> typing.AsyncIterator:
|
||||
return self
|
||||
|
||||
def __anext__(self) -> Future:
|
||||
if self.done():
|
||||
# Lookup by name to silence pyflakes on older versions.
|
||||
raise getattr(builtins, "StopAsyncIteration")()
|
||||
return self.next()
|
||||
|
||||
|
||||
def multi(
|
||||
children: Union[List[_Yieldable], Dict[Any, _Yieldable]],
|
||||
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
|
||||
) -> "Union[Future[List], Future[Dict]]":
|
||||
"""Runs multiple asynchronous operations in parallel.
|
||||
|
||||
``children`` may either be a list or a dict whose values are
|
||||
yieldable objects. ``multi()`` returns a new yieldable
|
||||
object that resolves to a parallel structure containing their
|
||||
results. If ``children`` is a list, the result is a list of
|
||||
results in the same order; if it is a dict, the result is a dict
|
||||
with the same keys.
|
||||
|
||||
That is, ``results = yield multi(list_of_futures)`` is equivalent
|
||||
to::
|
||||
|
||||
results = []
|
||||
for future in list_of_futures:
|
||||
results.append(yield future)
|
||||
|
||||
If any children raise exceptions, ``multi()`` will raise the first
|
||||
one. All others will be logged, unless they are of types
|
||||
contained in the ``quiet_exceptions`` argument.
|
||||
|
||||
In a ``yield``-based coroutine, it is not normally necessary to
|
||||
call this function directly, since the coroutine runner will
|
||||
do it automatically when a list or dict is yielded. However,
|
||||
it is necessary in ``await``-based coroutines, or to pass
|
||||
the ``quiet_exceptions`` argument.
|
||||
|
||||
This function is available under the names ``multi()`` and ``Multi()``
|
||||
for historical reasons.
|
||||
|
||||
Cancelling a `.Future` returned by ``multi()`` does not cancel its
|
||||
children. `asyncio.gather` is similar to ``multi()``, but it does
|
||||
cancel its children.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
If multiple yieldables fail, any exceptions after the first
|
||||
(which is raised) will be logged. Added the ``quiet_exceptions``
|
||||
argument to suppress this logging for selected exception types.
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Replaced the class ``Multi`` and the function ``multi_future``
|
||||
with a unified function ``multi``. Added support for yieldables
|
||||
other than ``YieldPoint`` and `.Future`.
|
||||
|
||||
"""
|
||||
return multi_future(children, quiet_exceptions=quiet_exceptions)
|
||||
|
||||
|
||||
Multi = multi
|
||||
|
||||
|
||||
def multi_future(
|
||||
children: Union[List[_Yieldable], Dict[Any, _Yieldable]],
|
||||
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
|
||||
) -> "Union[Future[List], Future[Dict]]":
|
||||
"""Wait for multiple asynchronous futures in parallel.
|
||||
|
||||
Since Tornado 6.0, this function is exactly the same as `multi`.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
If multiple ``Futures`` fail, any exceptions after the first (which is
|
||||
raised) will be logged. Added the ``quiet_exceptions``
|
||||
argument to suppress this logging for selected exception types.
|
||||
|
||||
.. deprecated:: 4.3
|
||||
Use `multi` instead.
|
||||
"""
|
||||
if isinstance(children, dict):
|
||||
keys = list(children.keys()) # type: Optional[List]
|
||||
children_seq = children.values() # type: Iterable
|
||||
else:
|
||||
keys = None
|
||||
children_seq = children
|
||||
children_futs = list(map(convert_yielded, children_seq))
|
||||
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs)
|
||||
unfinished_children = set(children_futs)
|
||||
|
||||
future = _create_future()
|
||||
if not children_futs:
|
||||
future_set_result_unless_cancelled(future, {} if keys is not None else [])
|
||||
|
||||
def callback(fut: Future) -> None:
|
||||
unfinished_children.remove(fut)
|
||||
if not unfinished_children:
|
||||
result_list = []
|
||||
for f in children_futs:
|
||||
try:
|
||||
result_list.append(f.result())
|
||||
except Exception as e:
|
||||
if future.done():
|
||||
if not isinstance(e, quiet_exceptions):
|
||||
app_log.error(
|
||||
"Multiple exceptions in yield list", exc_info=True
|
||||
)
|
||||
else:
|
||||
future_set_exc_info(future, sys.exc_info())
|
||||
if not future.done():
|
||||
if keys is not None:
|
||||
future_set_result_unless_cancelled(
|
||||
future, dict(zip(keys, result_list))
|
||||
)
|
||||
else:
|
||||
future_set_result_unless_cancelled(future, result_list)
|
||||
|
||||
listening = set() # type: Set[Future]
|
||||
for f in children_futs:
|
||||
if f not in listening:
|
||||
listening.add(f)
|
||||
future_add_done_callback(f, callback)
|
||||
return future
|
||||
|
||||
|
||||
def maybe_future(x: Any) -> Future:
|
||||
"""Converts ``x`` into a `.Future`.
|
||||
|
||||
If ``x`` is already a `.Future`, it is simply returned; otherwise
|
||||
it is wrapped in a new `.Future`. This is suitable for use as
|
||||
``result = yield gen.maybe_future(f())`` when you don't know whether
|
||||
``f()`` returns a `.Future` or not.
|
||||
|
||||
.. deprecated:: 4.3
|
||||
This function only handles ``Futures``, not other yieldable objects.
|
||||
Instead of `maybe_future`, check for the non-future result types
|
||||
you expect (often just ``None``), and ``yield`` anything unknown.
|
||||
"""
|
||||
if is_future(x):
|
||||
return x
|
||||
else:
|
||||
fut = _create_future()
|
||||
fut.set_result(x)
|
||||
return fut
|
||||
|
||||
|
||||
def with_timeout(
|
||||
timeout: Union[float, datetime.timedelta],
|
||||
future: _Yieldable,
|
||||
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
|
||||
) -> Future:
|
||||
"""Wraps a `.Future` (or other yieldable object) in a timeout.
|
||||
|
||||
Raises `tornado.util.TimeoutError` if the input future does not
|
||||
complete before ``timeout``, which may be specified in any form
|
||||
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
|
||||
an absolute time relative to `.IOLoop.time`)
|
||||
|
||||
If the wrapped `.Future` fails after it has timed out, the exception
|
||||
will be logged unless it is either of a type contained in
|
||||
``quiet_exceptions`` (which may be an exception type or a sequence of
|
||||
types), or an ``asyncio.CancelledError``.
|
||||
|
||||
The wrapped `.Future` is not canceled when the timeout expires,
|
||||
permitting it to be reused. `asyncio.wait_for` is similar to this
|
||||
function but it does cancel the wrapped `.Future` on timeout.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
Added the ``quiet_exceptions`` argument and the logging of unhandled
|
||||
exceptions.
|
||||
|
||||
.. versionchanged:: 4.4
|
||||
Added support for yieldable objects other than `.Future`.
|
||||
|
||||
.. versionchanged:: 6.0.3
|
||||
``asyncio.CancelledError`` is now always considered "quiet".
|
||||
|
||||
"""
|
||||
# It's tempting to optimize this by cancelling the input future on timeout
|
||||
# instead of creating a new one, but A) we can't know if we are the only
|
||||
# one waiting on the input future, so cancelling it might disrupt other
|
||||
# callers and B) concurrent futures can only be cancelled while they are
|
||||
# in the queue, so cancellation cannot reliably bound our waiting time.
|
||||
future_converted = convert_yielded(future)
|
||||
result = _create_future()
|
||||
chain_future(future_converted, result)
|
||||
io_loop = IOLoop.current()
|
||||
|
||||
def error_callback(future: Future) -> None:
|
||||
try:
|
||||
future.result()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
if not isinstance(e, quiet_exceptions):
|
||||
app_log.error(
|
||||
"Exception in Future %r after timeout", future, exc_info=True
|
||||
)
|
||||
|
||||
def timeout_callback() -> None:
|
||||
if not result.done():
|
||||
result.set_exception(TimeoutError("Timeout"))
|
||||
# In case the wrapped future goes on to fail, log it.
|
||||
future_add_done_callback(future_converted, error_callback)
|
||||
|
||||
timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
|
||||
if isinstance(future_converted, Future):
|
||||
# We know this future will resolve on the IOLoop, so we don't
|
||||
# need the extra thread-safety of IOLoop.add_future (and we also
|
||||
# don't care about StackContext here.
|
||||
future_add_done_callback(
|
||||
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
|
||||
)
|
||||
else:
|
||||
# concurrent.futures.Futures may resolve on any thread, so we
|
||||
# need to route them back to the IOLoop.
|
||||
io_loop.add_future(
|
||||
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def sleep(duration: float) -> "Future[None]":
|
||||
"""Return a `.Future` that resolves after the given number of seconds.
|
||||
|
||||
When used with ``yield`` in a coroutine, this is a non-blocking
|
||||
analogue to `time.sleep` (which should not be used in coroutines
|
||||
because it is blocking)::
|
||||
|
||||
yield gen.sleep(0.5)
|
||||
|
||||
Note that calling this function on its own does nothing; you must
|
||||
wait on the `.Future` it returns (usually by yielding it).
|
||||
|
||||
.. versionadded:: 4.1
|
||||
"""
|
||||
f = _create_future()
|
||||
IOLoop.current().call_later(
|
||||
duration, lambda: future_set_result_unless_cancelled(f, None)
|
||||
)
|
||||
return f
|
||||
|
||||
|
||||
class _NullFuture(object):
|
||||
"""_NullFuture resembles a Future that finished with a result of None.
|
||||
|
||||
It's not actually a `Future` to avoid depending on a particular event loop.
|
||||
Handled as a special case in the coroutine runner.
|
||||
|
||||
We lie and tell the type checker that a _NullFuture is a Future so
|
||||
we don't have to leak _NullFuture into lots of public APIs. But
|
||||
this means that the type checker can't warn us when we're passing
|
||||
a _NullFuture into a code path that doesn't understand what to do
|
||||
with it.
|
||||
"""
|
||||
|
||||
def result(self) -> None:
|
||||
return None
|
||||
|
||||
def done(self) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
# _null_future is used as a dummy value in the coroutine runner. It differs
|
||||
# from moment in that moment always adds a delay of one IOLoop iteration
|
||||
# while _null_future is processed as soon as possible.
|
||||
_null_future = typing.cast(Future, _NullFuture())
|
||||
|
||||
moment = typing.cast(Future, _NullFuture())
|
||||
moment.__doc__ = """A special object which may be yielded to allow the IOLoop to run for
|
||||
one iteration.
|
||||
|
||||
This is not needed in normal use but it can be helpful in long-running
|
||||
coroutines that are likely to yield Futures that are ready instantly.
|
||||
|
||||
Usage: ``yield gen.moment``
|
||||
|
||||
In native coroutines, the equivalent of ``yield gen.moment`` is
|
||||
``await asyncio.sleep(0)``.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
|
||||
.. deprecated:: 4.5
|
||||
``yield None`` (or ``yield`` with no argument) is now equivalent to
|
||||
``yield gen.moment``.
|
||||
"""
|
||||
|
||||
|
||||
class Runner(object):
|
||||
"""Internal implementation of `tornado.gen.coroutine`.
|
||||
|
||||
Maintains information about pending callbacks and their results.
|
||||
|
||||
The results of the generator are stored in ``result_future`` (a
|
||||
`.Future`)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ctx_run: Callable,
|
||||
gen: "Generator[_Yieldable, Any, _T]",
|
||||
result_future: "Future[_T]",
|
||||
first_yielded: _Yieldable,
|
||||
) -> None:
|
||||
self.ctx_run = ctx_run
|
||||
self.gen = gen
|
||||
self.result_future = result_future
|
||||
self.future = _null_future # type: Union[None, Future]
|
||||
self.running = False
|
||||
self.finished = False
|
||||
self.io_loop = IOLoop.current()
|
||||
if self.handle_yield(first_yielded):
|
||||
gen = result_future = first_yielded = None # type: ignore
|
||||
self.ctx_run(self.run)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Starts or resumes the generator, running until it reaches a
|
||||
yield point that is not ready.
|
||||
"""
|
||||
if self.running or self.finished:
|
||||
return
|
||||
try:
|
||||
self.running = True
|
||||
while True:
|
||||
future = self.future
|
||||
if future is None:
|
||||
raise Exception("No pending future")
|
||||
if not future.done():
|
||||
return
|
||||
self.future = None
|
||||
try:
|
||||
exc_info = None
|
||||
|
||||
try:
|
||||
value = future.result()
|
||||
except Exception:
|
||||
exc_info = sys.exc_info()
|
||||
future = None
|
||||
|
||||
if exc_info is not None:
|
||||
try:
|
||||
yielded = self.gen.throw(*exc_info) # type: ignore
|
||||
finally:
|
||||
# Break up a reference to itself
|
||||
# for faster GC on CPython.
|
||||
exc_info = None
|
||||
else:
|
||||
yielded = self.gen.send(value)
|
||||
|
||||
except (StopIteration, Return) as e:
|
||||
self.finished = True
|
||||
self.future = _null_future
|
||||
future_set_result_unless_cancelled(
|
||||
self.result_future, _value_from_stopiteration(e)
|
||||
)
|
||||
self.result_future = None # type: ignore
|
||||
return
|
||||
except Exception:
|
||||
self.finished = True
|
||||
self.future = _null_future
|
||||
future_set_exc_info(self.result_future, sys.exc_info())
|
||||
self.result_future = None # type: ignore
|
||||
return
|
||||
if not self.handle_yield(yielded):
|
||||
return
|
||||
yielded = None
|
||||
finally:
|
||||
self.running = False
|
||||
|
||||
def handle_yield(self, yielded: _Yieldable) -> bool:
|
||||
try:
|
||||
self.future = convert_yielded(yielded)
|
||||
except BadYieldError:
|
||||
self.future = Future()
|
||||
future_set_exc_info(self.future, sys.exc_info())
|
||||
|
||||
if self.future is moment:
|
||||
self.io_loop.add_callback(self.ctx_run, self.run)
|
||||
return False
|
||||
elif self.future is None:
|
||||
raise Exception("no pending future")
|
||||
elif not self.future.done():
|
||||
|
||||
def inner(f: Any) -> None:
|
||||
# Break a reference cycle to speed GC.
|
||||
f = None # noqa: F841
|
||||
self.ctx_run(self.run)
|
||||
|
||||
self.io_loop.add_future(self.future, inner)
|
||||
return False
|
||||
return True
|
||||
|
||||
def handle_exception(
|
||||
self, typ: Type[Exception], value: Exception, tb: types.TracebackType
|
||||
) -> bool:
|
||||
if not self.running and not self.finished:
|
||||
self.future = Future()
|
||||
future_set_exc_info(self.future, (typ, value, tb))
|
||||
self.ctx_run(self.run)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# Convert Awaitables into Futures.
|
||||
try:
|
||||
_wrap_awaitable = asyncio.ensure_future
|
||||
except AttributeError:
|
||||
# asyncio.ensure_future was introduced in Python 3.4.4, but
|
||||
# Debian jessie still ships with 3.4.2 so try the old name.
|
||||
_wrap_awaitable = getattr(asyncio, "async")
|
||||
|
||||
|
||||
def convert_yielded(yielded: _Yieldable) -> Future:
|
||||
"""Convert a yielded object into a `.Future`.
|
||||
|
||||
The default implementation accepts lists, dictionaries, and
|
||||
Futures. This has the side effect of starting any coroutines that
|
||||
did not start themselves, similar to `asyncio.ensure_future`.
|
||||
|
||||
If the `~functools.singledispatch` library is available, this function
|
||||
may be extended to support additional types. For example::
|
||||
|
||||
@convert_yielded.register(asyncio.Future)
|
||||
def _(asyncio_future):
|
||||
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
|
||||
|
||||
.. versionadded:: 4.1
|
||||
|
||||
"""
|
||||
if yielded is None or yielded is moment:
|
||||
return moment
|
||||
elif yielded is _null_future:
|
||||
return _null_future
|
||||
elif isinstance(yielded, (list, dict)):
|
||||
return multi(yielded) # type: ignore
|
||||
elif is_future(yielded):
|
||||
return typing.cast(Future, yielded)
|
||||
elif isawaitable(yielded):
|
||||
return _wrap_awaitable(yielded) # type: ignore
|
||||
else:
|
||||
raise BadYieldError("yielded unknown object %r" % (yielded,))
|
||||
|
||||
|
||||
convert_yielded = singledispatch(convert_yielded)
|
842
venv/Lib/site-packages/tornado/http1connection.py
Normal file
842
venv/Lib/site-packages/tornado/http1connection.py
Normal file
|
@ -0,0 +1,842 @@
|
|||
#
|
||||
# Copyright 2014 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Client and server implementations of HTTP/1.x.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
import types
|
||||
|
||||
from tornado.concurrent import (
|
||||
Future,
|
||||
future_add_done_callback,
|
||||
future_set_result_unless_cancelled,
|
||||
)
|
||||
from tornado.escape import native_str, utf8
|
||||
from tornado import gen
|
||||
from tornado import httputil
|
||||
from tornado import iostream
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.util import GzipDecompressor
|
||||
|
||||
|
||||
from typing import cast, Optional, Type, Awaitable, Callable, Union, Tuple
|
||||
|
||||
|
||||
class _QuietException(Exception):
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class _ExceptionLoggingContext(object):
|
||||
"""Used with the ``with`` statement when calling delegate methods to
|
||||
log any exceptions with the given logger. Any exceptions caught are
|
||||
converted to _QuietException
|
||||
"""
|
||||
|
||||
def __init__(self, logger: logging.Logger) -> None:
|
||||
self.logger = logger
|
||||
|
||||
def __enter__(self) -> None:
|
||||
pass
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
tb: types.TracebackType,
|
||||
) -> None:
|
||||
if value is not None:
|
||||
assert typ is not None
|
||||
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
|
||||
raise _QuietException
|
||||
|
||||
|
||||
class HTTP1ConnectionParameters(object):
|
||||
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
no_keep_alive: bool = False,
|
||||
chunk_size: Optional[int] = None,
|
||||
max_header_size: Optional[int] = None,
|
||||
header_timeout: Optional[float] = None,
|
||||
max_body_size: Optional[int] = None,
|
||||
body_timeout: Optional[float] = None,
|
||||
decompress: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
:arg bool no_keep_alive: If true, always close the connection after
|
||||
one request.
|
||||
:arg int chunk_size: how much data to read into memory at once
|
||||
:arg int max_header_size: maximum amount of data for HTTP headers
|
||||
:arg float header_timeout: how long to wait for all headers (seconds)
|
||||
:arg int max_body_size: maximum amount of data for body
|
||||
:arg float body_timeout: how long to wait while reading body (seconds)
|
||||
:arg bool decompress: if true, decode incoming
|
||||
``Content-Encoding: gzip``
|
||||
"""
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.chunk_size = chunk_size or 65536
|
||||
self.max_header_size = max_header_size or 65536
|
||||
self.header_timeout = header_timeout
|
||||
self.max_body_size = max_body_size
|
||||
self.body_timeout = body_timeout
|
||||
self.decompress = decompress
|
||||
|
||||
|
||||
class HTTP1Connection(httputil.HTTPConnection):
|
||||
"""Implements the HTTP/1.x protocol.
|
||||
|
||||
This class can be on its own for clients, or via `HTTP1ServerConnection`
|
||||
for servers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: iostream.IOStream,
|
||||
is_client: bool,
|
||||
params: Optional[HTTP1ConnectionParameters] = None,
|
||||
context: Optional[object] = None,
|
||||
) -> None:
|
||||
"""
|
||||
:arg stream: an `.IOStream`
|
||||
:arg bool is_client: client or server
|
||||
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
|
||||
:arg context: an opaque application-defined object that can be accessed
|
||||
as ``connection.context``.
|
||||
"""
|
||||
self.is_client = is_client
|
||||
self.stream = stream
|
||||
if params is None:
|
||||
params = HTTP1ConnectionParameters()
|
||||
self.params = params
|
||||
self.context = context
|
||||
self.no_keep_alive = params.no_keep_alive
|
||||
# The body limits can be altered by the delegate, so save them
|
||||
# here instead of just referencing self.params later.
|
||||
self._max_body_size = self.params.max_body_size or self.stream.max_buffer_size
|
||||
self._body_timeout = self.params.body_timeout
|
||||
# _write_finished is set to True when finish() has been called,
|
||||
# i.e. there will be no more data sent. Data may still be in the
|
||||
# stream's write buffer.
|
||||
self._write_finished = False
|
||||
# True when we have read the entire incoming body.
|
||||
self._read_finished = False
|
||||
# _finish_future resolves when all data has been written and flushed
|
||||
# to the IOStream.
|
||||
self._finish_future = Future() # type: Future[None]
|
||||
# If true, the connection should be closed after this request
|
||||
# (after the response has been written in the server side,
|
||||
# and after it has been read in the client)
|
||||
self._disconnect_on_finish = False
|
||||
self._clear_callbacks()
|
||||
# Save the start lines after we read or write them; they
|
||||
# affect later processing (e.g. 304 responses and HEAD methods
|
||||
# have content-length but no bodies)
|
||||
self._request_start_line = None # type: Optional[httputil.RequestStartLine]
|
||||
self._response_start_line = None # type: Optional[httputil.ResponseStartLine]
|
||||
self._request_headers = None # type: Optional[httputil.HTTPHeaders]
|
||||
# True if we are writing output with chunked encoding.
|
||||
self._chunking_output = False
|
||||
# While reading a body with a content-length, this is the
|
||||
# amount left to read.
|
||||
self._expected_content_remaining = None # type: Optional[int]
|
||||
# A Future for our outgoing writes, returned by IOStream.write.
|
||||
self._pending_write = None # type: Optional[Future[None]]
|
||||
|
||||
def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
|
||||
"""Read a single HTTP response.
|
||||
|
||||
Typical client-mode usage is to write a request using `write_headers`,
|
||||
`write`, and `finish`, and then call ``read_response``.
|
||||
|
||||
:arg delegate: a `.HTTPMessageDelegate`
|
||||
|
||||
Returns a `.Future` that resolves to a bool after the full response has
|
||||
been read. The result is true if the stream is still open.
|
||||
"""
|
||||
if self.params.decompress:
|
||||
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
|
||||
return self._read_message(delegate)
|
||||
|
||||
async def _read_message(self, delegate: httputil.HTTPMessageDelegate) -> bool:
|
||||
need_delegate_close = False
|
||||
try:
|
||||
header_future = self.stream.read_until_regex(
|
||||
b"\r?\n\r?\n", max_bytes=self.params.max_header_size
|
||||
)
|
||||
if self.params.header_timeout is None:
|
||||
header_data = await header_future
|
||||
else:
|
||||
try:
|
||||
header_data = await gen.with_timeout(
|
||||
self.stream.io_loop.time() + self.params.header_timeout,
|
||||
header_future,
|
||||
quiet_exceptions=iostream.StreamClosedError,
|
||||
)
|
||||
except gen.TimeoutError:
|
||||
self.close()
|
||||
return False
|
||||
start_line_str, headers = self._parse_headers(header_data)
|
||||
if self.is_client:
|
||||
resp_start_line = httputil.parse_response_start_line(start_line_str)
|
||||
self._response_start_line = resp_start_line
|
||||
start_line = (
|
||||
resp_start_line
|
||||
) # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine]
|
||||
# TODO: this will need to change to support client-side keepalive
|
||||
self._disconnect_on_finish = False
|
||||
else:
|
||||
req_start_line = httputil.parse_request_start_line(start_line_str)
|
||||
self._request_start_line = req_start_line
|
||||
self._request_headers = headers
|
||||
start_line = req_start_line
|
||||
self._disconnect_on_finish = not self._can_keep_alive(
|
||||
req_start_line, headers
|
||||
)
|
||||
need_delegate_close = True
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
header_recv_future = delegate.headers_received(start_line, headers)
|
||||
if header_recv_future is not None:
|
||||
await header_recv_future
|
||||
if self.stream is None:
|
||||
# We've been detached.
|
||||
need_delegate_close = False
|
||||
return False
|
||||
skip_body = False
|
||||
if self.is_client:
|
||||
assert isinstance(start_line, httputil.ResponseStartLine)
|
||||
if (
|
||||
self._request_start_line is not None
|
||||
and self._request_start_line.method == "HEAD"
|
||||
):
|
||||
skip_body = True
|
||||
code = start_line.code
|
||||
if code == 304:
|
||||
# 304 responses may include the content-length header
|
||||
# but do not actually have a body.
|
||||
# http://tools.ietf.org/html/rfc7230#section-3.3
|
||||
skip_body = True
|
||||
if 100 <= code < 200:
|
||||
# 1xx responses should never indicate the presence of
|
||||
# a body.
|
||||
if "Content-Length" in headers or "Transfer-Encoding" in headers:
|
||||
raise httputil.HTTPInputError(
|
||||
"Response code %d cannot have body" % code
|
||||
)
|
||||
# TODO: client delegates will get headers_received twice
|
||||
# in the case of a 100-continue. Document or change?
|
||||
await self._read_message(delegate)
|
||||
else:
|
||||
if headers.get("Expect") == "100-continue" and not self._write_finished:
|
||||
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
|
||||
if not skip_body:
|
||||
body_future = self._read_body(
|
||||
resp_start_line.code if self.is_client else 0, headers, delegate
|
||||
)
|
||||
if body_future is not None:
|
||||
if self._body_timeout is None:
|
||||
await body_future
|
||||
else:
|
||||
try:
|
||||
await gen.with_timeout(
|
||||
self.stream.io_loop.time() + self._body_timeout,
|
||||
body_future,
|
||||
quiet_exceptions=iostream.StreamClosedError,
|
||||
)
|
||||
except gen.TimeoutError:
|
||||
gen_log.info("Timeout reading body from %s", self.context)
|
||||
self.stream.close()
|
||||
return False
|
||||
self._read_finished = True
|
||||
if not self._write_finished or self.is_client:
|
||||
need_delegate_close = False
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
delegate.finish()
|
||||
# If we're waiting for the application to produce an asynchronous
|
||||
# response, and we're not detached, register a close callback
|
||||
# on the stream (we didn't need one while we were reading)
|
||||
if (
|
||||
not self._finish_future.done()
|
||||
and self.stream is not None
|
||||
and not self.stream.closed()
|
||||
):
|
||||
self.stream.set_close_callback(self._on_connection_close)
|
||||
await self._finish_future
|
||||
if self.is_client and self._disconnect_on_finish:
|
||||
self.close()
|
||||
if self.stream is None:
|
||||
return False
|
||||
except httputil.HTTPInputError as e:
|
||||
gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
|
||||
if not self.is_client:
|
||||
await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n")
|
||||
self.close()
|
||||
return False
|
||||
finally:
|
||||
if need_delegate_close:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
delegate.on_connection_close()
|
||||
header_future = None # type: ignore
|
||||
self._clear_callbacks()
|
||||
return True
|
||||
|
||||
def _clear_callbacks(self) -> None:
|
||||
"""Clears the callback attributes.
|
||||
|
||||
This allows the request handler to be garbage collected more
|
||||
quickly in CPython by breaking up reference cycles.
|
||||
"""
|
||||
self._write_callback = None
|
||||
self._write_future = None # type: Optional[Future[None]]
|
||||
self._close_callback = None # type: Optional[Callable[[], None]]
|
||||
if self.stream is not None:
|
||||
self.stream.set_close_callback(None)
|
||||
|
||||
def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None:
|
||||
"""Sets a callback that will be run when the connection is closed.
|
||||
|
||||
Note that this callback is slightly different from
|
||||
`.HTTPMessageDelegate.on_connection_close`: The
|
||||
`.HTTPMessageDelegate` method is called when the connection is
|
||||
closed while receiving a message. This callback is used when
|
||||
there is not an active delegate (for example, on the server
|
||||
side this callback is used if the client closes the connection
|
||||
after sending its request but before receiving all the
|
||||
response.
|
||||
"""
|
||||
self._close_callback = callback
|
||||
|
||||
def _on_connection_close(self) -> None:
|
||||
# Note that this callback is only registered on the IOStream
|
||||
# when we have finished reading the request and are waiting for
|
||||
# the application to produce its response.
|
||||
if self._close_callback is not None:
|
||||
callback = self._close_callback
|
||||
self._close_callback = None
|
||||
callback()
|
||||
if not self._finish_future.done():
|
||||
future_set_result_unless_cancelled(self._finish_future, None)
|
||||
self._clear_callbacks()
|
||||
|
||||
def close(self) -> None:
|
||||
if self.stream is not None:
|
||||
self.stream.close()
|
||||
self._clear_callbacks()
|
||||
if not self._finish_future.done():
|
||||
future_set_result_unless_cancelled(self._finish_future, None)
|
||||
|
||||
def detach(self) -> iostream.IOStream:
|
||||
"""Take control of the underlying stream.
|
||||
|
||||
Returns the underlying `.IOStream` object and stops all further
|
||||
HTTP processing. May only be called during
|
||||
`.HTTPMessageDelegate.headers_received`. Intended for implementing
|
||||
protocols like websockets that tunnel over an HTTP handshake.
|
||||
"""
|
||||
self._clear_callbacks()
|
||||
stream = self.stream
|
||||
self.stream = None # type: ignore
|
||||
if not self._finish_future.done():
|
||||
future_set_result_unless_cancelled(self._finish_future, None)
|
||||
return stream
|
||||
|
||||
def set_body_timeout(self, timeout: float) -> None:
|
||||
"""Sets the body timeout for a single request.
|
||||
|
||||
Overrides the value from `.HTTP1ConnectionParameters`.
|
||||
"""
|
||||
self._body_timeout = timeout
|
||||
|
||||
def set_max_body_size(self, max_body_size: int) -> None:
|
||||
"""Sets the body size limit for a single request.
|
||||
|
||||
Overrides the value from `.HTTP1ConnectionParameters`.
|
||||
"""
|
||||
self._max_body_size = max_body_size
|
||||
|
||||
def write_headers(
|
||||
self,
|
||||
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
chunk: Optional[bytes] = None,
|
||||
) -> "Future[None]":
|
||||
"""Implements `.HTTPConnection.write_headers`."""
|
||||
lines = []
|
||||
if self.is_client:
|
||||
assert isinstance(start_line, httputil.RequestStartLine)
|
||||
self._request_start_line = start_line
|
||||
lines.append(utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1])))
|
||||
# Client requests with a non-empty body must have either a
|
||||
# Content-Length or a Transfer-Encoding.
|
||||
self._chunking_output = (
|
||||
start_line.method in ("POST", "PUT", "PATCH")
|
||||
and "Content-Length" not in headers
|
||||
and (
|
||||
"Transfer-Encoding" not in headers
|
||||
or headers["Transfer-Encoding"] == "chunked"
|
||||
)
|
||||
)
|
||||
else:
|
||||
assert isinstance(start_line, httputil.ResponseStartLine)
|
||||
assert self._request_start_line is not None
|
||||
assert self._request_headers is not None
|
||||
self._response_start_line = start_line
|
||||
lines.append(utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
|
||||
self._chunking_output = (
|
||||
# TODO: should this use
|
||||
# self._request_start_line.version or
|
||||
# start_line.version?
|
||||
self._request_start_line.version == "HTTP/1.1"
|
||||
# Omit payload header field for HEAD request.
|
||||
and self._request_start_line.method != "HEAD"
|
||||
# 1xx, 204 and 304 responses have no body (not even a zero-length
|
||||
# body), and so should not have either Content-Length or
|
||||
# Transfer-Encoding headers.
|
||||
and start_line.code not in (204, 304)
|
||||
and (start_line.code < 100 or start_line.code >= 200)
|
||||
# No need to chunk the output if a Content-Length is specified.
|
||||
and "Content-Length" not in headers
|
||||
# Applications are discouraged from touching Transfer-Encoding,
|
||||
# but if they do, leave it alone.
|
||||
and "Transfer-Encoding" not in headers
|
||||
)
|
||||
# If connection to a 1.1 client will be closed, inform client
|
||||
if (
|
||||
self._request_start_line.version == "HTTP/1.1"
|
||||
and self._disconnect_on_finish
|
||||
):
|
||||
headers["Connection"] = "close"
|
||||
# If a 1.0 client asked for keep-alive, add the header.
|
||||
if (
|
||||
self._request_start_line.version == "HTTP/1.0"
|
||||
and self._request_headers.get("Connection", "").lower() == "keep-alive"
|
||||
):
|
||||
headers["Connection"] = "Keep-Alive"
|
||||
if self._chunking_output:
|
||||
headers["Transfer-Encoding"] = "chunked"
|
||||
if not self.is_client and (
|
||||
self._request_start_line.method == "HEAD"
|
||||
or cast(httputil.ResponseStartLine, start_line).code == 304
|
||||
):
|
||||
self._expected_content_remaining = 0
|
||||
elif "Content-Length" in headers:
|
||||
self._expected_content_remaining = int(headers["Content-Length"])
|
||||
else:
|
||||
self._expected_content_remaining = None
|
||||
# TODO: headers are supposed to be of type str, but we still have some
|
||||
# cases that let bytes slip through. Remove these native_str calls when those
|
||||
# are fixed.
|
||||
header_lines = (
|
||||
native_str(n) + ": " + native_str(v) for n, v in headers.get_all()
|
||||
)
|
||||
lines.extend(line.encode("latin1") for line in header_lines)
|
||||
for line in lines:
|
||||
if b"\n" in line:
|
||||
raise ValueError("Newline in header: " + repr(line))
|
||||
future = None
|
||||
if self.stream.closed():
|
||||
future = self._write_future = Future()
|
||||
future.set_exception(iostream.StreamClosedError())
|
||||
future.exception()
|
||||
else:
|
||||
future = self._write_future = Future()
|
||||
data = b"\r\n".join(lines) + b"\r\n\r\n"
|
||||
if chunk:
|
||||
data += self._format_chunk(chunk)
|
||||
self._pending_write = self.stream.write(data)
|
||||
future_add_done_callback(self._pending_write, self._on_write_complete)
|
||||
return future
|
||||
|
||||
def _format_chunk(self, chunk: bytes) -> bytes:
|
||||
if self._expected_content_remaining is not None:
|
||||
self._expected_content_remaining -= len(chunk)
|
||||
if self._expected_content_remaining < 0:
|
||||
# Close the stream now to stop further framing errors.
|
||||
self.stream.close()
|
||||
raise httputil.HTTPOutputError(
|
||||
"Tried to write more data than Content-Length"
|
||||
)
|
||||
if self._chunking_output and chunk:
|
||||
# Don't write out empty chunks because that means END-OF-STREAM
|
||||
# with chunked encoding
|
||||
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
|
||||
else:
|
||||
return chunk
|
||||
|
||||
def write(self, chunk: bytes) -> "Future[None]":
|
||||
"""Implements `.HTTPConnection.write`.
|
||||
|
||||
For backwards compatibility it is allowed but deprecated to
|
||||
skip `write_headers` and instead call `write()` with a
|
||||
pre-encoded header block.
|
||||
"""
|
||||
future = None
|
||||
if self.stream.closed():
|
||||
future = self._write_future = Future()
|
||||
self._write_future.set_exception(iostream.StreamClosedError())
|
||||
self._write_future.exception()
|
||||
else:
|
||||
future = self._write_future = Future()
|
||||
self._pending_write = self.stream.write(self._format_chunk(chunk))
|
||||
future_add_done_callback(self._pending_write, self._on_write_complete)
|
||||
return future
|
||||
|
||||
def finish(self) -> None:
|
||||
"""Implements `.HTTPConnection.finish`."""
|
||||
if (
|
||||
self._expected_content_remaining is not None
|
||||
and self._expected_content_remaining != 0
|
||||
and not self.stream.closed()
|
||||
):
|
||||
self.stream.close()
|
||||
raise httputil.HTTPOutputError(
|
||||
"Tried to write %d bytes less than Content-Length"
|
||||
% self._expected_content_remaining
|
||||
)
|
||||
if self._chunking_output:
|
||||
if not self.stream.closed():
|
||||
self._pending_write = self.stream.write(b"0\r\n\r\n")
|
||||
self._pending_write.add_done_callback(self._on_write_complete)
|
||||
self._write_finished = True
|
||||
# If the app finished the request while we're still reading,
|
||||
# divert any remaining data away from the delegate and
|
||||
# close the connection when we're done sending our response.
|
||||
# Closing the connection is the only way to avoid reading the
|
||||
# whole input body.
|
||||
if not self._read_finished:
|
||||
self._disconnect_on_finish = True
|
||||
# No more data is coming, so instruct TCP to send any remaining
|
||||
# data immediately instead of waiting for a full packet or ack.
|
||||
self.stream.set_nodelay(True)
|
||||
if self._pending_write is None:
|
||||
self._finish_request(None)
|
||||
else:
|
||||
future_add_done_callback(self._pending_write, self._finish_request)
|
||||
|
||||
def _on_write_complete(self, future: "Future[None]") -> None:
|
||||
exc = future.exception()
|
||||
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
|
||||
future.result()
|
||||
if self._write_callback is not None:
|
||||
callback = self._write_callback
|
||||
self._write_callback = None
|
||||
self.stream.io_loop.add_callback(callback)
|
||||
if self._write_future is not None:
|
||||
future = self._write_future
|
||||
self._write_future = None
|
||||
future_set_result_unless_cancelled(future, None)
|
||||
|
||||
def _can_keep_alive(
|
||||
self, start_line: httputil.RequestStartLine, headers: httputil.HTTPHeaders
|
||||
) -> bool:
|
||||
if self.params.no_keep_alive:
|
||||
return False
|
||||
connection_header = headers.get("Connection")
|
||||
if connection_header is not None:
|
||||
connection_header = connection_header.lower()
|
||||
if start_line.version == "HTTP/1.1":
|
||||
return connection_header != "close"
|
||||
elif (
|
||||
"Content-Length" in headers
|
||||
or headers.get("Transfer-Encoding", "").lower() == "chunked"
|
||||
or getattr(start_line, "method", None) in ("HEAD", "GET")
|
||||
):
|
||||
# start_line may be a request or response start line; only
|
||||
# the former has a method attribute.
|
||||
return connection_header == "keep-alive"
|
||||
return False
|
||||
|
||||
def _finish_request(self, future: "Optional[Future[None]]") -> None:
|
||||
self._clear_callbacks()
|
||||
if not self.is_client and self._disconnect_on_finish:
|
||||
self.close()
|
||||
return
|
||||
# Turn Nagle's algorithm back on, leaving the stream in its
|
||||
# default state for the next request.
|
||||
self.stream.set_nodelay(False)
|
||||
if not self._finish_future.done():
|
||||
future_set_result_unless_cancelled(self._finish_future, None)
|
||||
|
||||
def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]:
|
||||
# The lstrip removes newlines that some implementations sometimes
|
||||
# insert between messages of a reused connection. Per RFC 7230,
|
||||
# we SHOULD ignore at least one empty line before the request.
|
||||
# http://tools.ietf.org/html/rfc7230#section-3.5
|
||||
data_str = native_str(data.decode("latin1")).lstrip("\r\n")
|
||||
# RFC 7230 section allows for both CRLF and bare LF.
|
||||
eol = data_str.find("\n")
|
||||
start_line = data_str[:eol].rstrip("\r")
|
||||
headers = httputil.HTTPHeaders.parse(data_str[eol:])
|
||||
return start_line, headers
|
||||
|
||||
def _read_body(
|
||||
self,
|
||||
code: int,
|
||||
headers: httputil.HTTPHeaders,
|
||||
delegate: httputil.HTTPMessageDelegate,
|
||||
) -> Optional[Awaitable[None]]:
|
||||
if "Content-Length" in headers:
|
||||
if "Transfer-Encoding" in headers:
|
||||
# Response cannot contain both Content-Length and
|
||||
# Transfer-Encoding headers.
|
||||
# http://tools.ietf.org/html/rfc7230#section-3.3.3
|
||||
raise httputil.HTTPInputError(
|
||||
"Response with both Transfer-Encoding and Content-Length"
|
||||
)
|
||||
if "," in headers["Content-Length"]:
|
||||
# Proxies sometimes cause Content-Length headers to get
|
||||
# duplicated. If all the values are identical then we can
|
||||
# use them but if they differ it's an error.
|
||||
pieces = re.split(r",\s*", headers["Content-Length"])
|
||||
if any(i != pieces[0] for i in pieces):
|
||||
raise httputil.HTTPInputError(
|
||||
"Multiple unequal Content-Lengths: %r"
|
||||
% headers["Content-Length"]
|
||||
)
|
||||
headers["Content-Length"] = pieces[0]
|
||||
|
||||
try:
|
||||
content_length = int(headers["Content-Length"]) # type: Optional[int]
|
||||
except ValueError:
|
||||
# Handles non-integer Content-Length value.
|
||||
raise httputil.HTTPInputError(
|
||||
"Only integer Content-Length is allowed: %s"
|
||||
% headers["Content-Length"]
|
||||
)
|
||||
|
||||
if cast(int, content_length) > self._max_body_size:
|
||||
raise httputil.HTTPInputError("Content-Length too long")
|
||||
else:
|
||||
content_length = None
|
||||
|
||||
if code == 204:
|
||||
# This response code is not allowed to have a non-empty body,
|
||||
# and has an implicit length of zero instead of read-until-close.
|
||||
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
|
||||
if "Transfer-Encoding" in headers or content_length not in (None, 0):
|
||||
raise httputil.HTTPInputError(
|
||||
"Response with code %d should not have body" % code
|
||||
)
|
||||
content_length = 0
|
||||
|
||||
if content_length is not None:
|
||||
return self._read_fixed_body(content_length, delegate)
|
||||
if headers.get("Transfer-Encoding", "").lower() == "chunked":
|
||||
return self._read_chunked_body(delegate)
|
||||
if self.is_client:
|
||||
return self._read_body_until_close(delegate)
|
||||
return None
|
||||
|
||||
async def _read_fixed_body(
|
||||
self, content_length: int, delegate: httputil.HTTPMessageDelegate
|
||||
) -> None:
|
||||
while content_length > 0:
|
||||
body = await self.stream.read_bytes(
|
||||
min(self.params.chunk_size, content_length), partial=True
|
||||
)
|
||||
content_length -= len(body)
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
ret = delegate.data_received(body)
|
||||
if ret is not None:
|
||||
await ret
|
||||
|
||||
async def _read_chunked_body(self, delegate: httputil.HTTPMessageDelegate) -> None:
|
||||
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
|
||||
total_size = 0
|
||||
while True:
|
||||
chunk_len_str = await self.stream.read_until(b"\r\n", max_bytes=64)
|
||||
chunk_len = int(chunk_len_str.strip(), 16)
|
||||
if chunk_len == 0:
|
||||
crlf = await self.stream.read_bytes(2)
|
||||
if crlf != b"\r\n":
|
||||
raise httputil.HTTPInputError(
|
||||
"improperly terminated chunked request"
|
||||
)
|
||||
return
|
||||
total_size += chunk_len
|
||||
if total_size > self._max_body_size:
|
||||
raise httputil.HTTPInputError("chunked body too large")
|
||||
bytes_to_read = chunk_len
|
||||
while bytes_to_read:
|
||||
chunk = await self.stream.read_bytes(
|
||||
min(bytes_to_read, self.params.chunk_size), partial=True
|
||||
)
|
||||
bytes_to_read -= len(chunk)
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
ret = delegate.data_received(chunk)
|
||||
if ret is not None:
|
||||
await ret
|
||||
# chunk ends with \r\n
|
||||
crlf = await self.stream.read_bytes(2)
|
||||
assert crlf == b"\r\n"
|
||||
|
||||
async def _read_body_until_close(
|
||||
self, delegate: httputil.HTTPMessageDelegate
|
||||
) -> None:
|
||||
body = await self.stream.read_until_close()
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
ret = delegate.data_received(body)
|
||||
if ret is not None:
|
||||
await ret
|
||||
|
||||
|
||||
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
|
||||
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
|
||||
"""
|
||||
|
||||
def __init__(self, delegate: httputil.HTTPMessageDelegate, chunk_size: int) -> None:
|
||||
self._delegate = delegate
|
||||
self._chunk_size = chunk_size
|
||||
self._decompressor = None # type: Optional[GzipDecompressor]
|
||||
|
||||
def headers_received(
|
||||
self,
|
||||
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> Optional[Awaitable[None]]:
|
||||
if headers.get("Content-Encoding") == "gzip":
|
||||
self._decompressor = GzipDecompressor()
|
||||
# Downstream delegates will only see uncompressed data,
|
||||
# so rename the content-encoding header.
|
||||
# (but note that curl_httpclient doesn't do this).
|
||||
headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"])
|
||||
del headers["Content-Encoding"]
|
||||
return self._delegate.headers_received(start_line, headers)
|
||||
|
||||
async def data_received(self, chunk: bytes) -> None:
|
||||
if self._decompressor:
|
||||
compressed_data = chunk
|
||||
while compressed_data:
|
||||
decompressed = self._decompressor.decompress(
|
||||
compressed_data, self._chunk_size
|
||||
)
|
||||
if decompressed:
|
||||
ret = self._delegate.data_received(decompressed)
|
||||
if ret is not None:
|
||||
await ret
|
||||
compressed_data = self._decompressor.unconsumed_tail
|
||||
if compressed_data and not decompressed:
|
||||
raise httputil.HTTPInputError(
|
||||
"encountered unconsumed gzip data without making progress"
|
||||
)
|
||||
else:
|
||||
ret = self._delegate.data_received(chunk)
|
||||
if ret is not None:
|
||||
await ret
|
||||
|
||||
def finish(self) -> None:
|
||||
if self._decompressor is not None:
|
||||
tail = self._decompressor.flush()
|
||||
if tail:
|
||||
# The tail should always be empty: decompress returned
|
||||
# all that it can in data_received and the only
|
||||
# purpose of the flush call is to detect errors such
|
||||
# as truncated input. If we did legitimately get a new
|
||||
# chunk at this point we'd need to change the
|
||||
# interface to make finish() a coroutine.
|
||||
raise ValueError(
|
||||
"decompressor.flush returned data; possible truncated input"
|
||||
)
|
||||
return self._delegate.finish()
|
||||
|
||||
def on_connection_close(self) -> None:
|
||||
return self._delegate.on_connection_close()
|
||||
|
||||
|
||||
class HTTP1ServerConnection(object):
|
||||
"""An HTTP/1.x server."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: iostream.IOStream,
|
||||
params: Optional[HTTP1ConnectionParameters] = None,
|
||||
context: Optional[object] = None,
|
||||
) -> None:
|
||||
"""
|
||||
:arg stream: an `.IOStream`
|
||||
:arg params: a `.HTTP1ConnectionParameters` or None
|
||||
:arg context: an opaque application-defined object that is accessible
|
||||
as ``connection.context``
|
||||
"""
|
||||
self.stream = stream
|
||||
if params is None:
|
||||
params = HTTP1ConnectionParameters()
|
||||
self.params = params
|
||||
self.context = context
|
||||
self._serving_future = None # type: Optional[Future[None]]
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Closes the connection.
|
||||
|
||||
Returns a `.Future` that resolves after the serving loop has exited.
|
||||
"""
|
||||
self.stream.close()
|
||||
# Block until the serving loop is done, but ignore any exceptions
|
||||
# (start_serving is already responsible for logging them).
|
||||
assert self._serving_future is not None
|
||||
try:
|
||||
await self._serving_future
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def start_serving(self, delegate: httputil.HTTPServerConnectionDelegate) -> None:
|
||||
"""Starts serving requests on this connection.
|
||||
|
||||
:arg delegate: a `.HTTPServerConnectionDelegate`
|
||||
"""
|
||||
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
|
||||
fut = gen.convert_yielded(self._server_request_loop(delegate))
|
||||
self._serving_future = fut
|
||||
# Register the future on the IOLoop so its errors get logged.
|
||||
self.stream.io_loop.add_future(fut, lambda f: f.result())
|
||||
|
||||
async def _server_request_loop(
|
||||
self, delegate: httputil.HTTPServerConnectionDelegate
|
||||
) -> None:
|
||||
try:
|
||||
while True:
|
||||
conn = HTTP1Connection(self.stream, False, self.params, self.context)
|
||||
request_delegate = delegate.start_request(self, conn)
|
||||
try:
|
||||
ret = await conn.read_response(request_delegate)
|
||||
except (
|
||||
iostream.StreamClosedError,
|
||||
iostream.UnsatisfiableReadError,
|
||||
asyncio.CancelledError,
|
||||
):
|
||||
return
|
||||
except _QuietException:
|
||||
# This exception was already logged.
|
||||
conn.close()
|
||||
return
|
||||
except Exception:
|
||||
gen_log.error("Uncaught exception", exc_info=True)
|
||||
conn.close()
|
||||
return
|
||||
if not ret:
|
||||
return
|
||||
await asyncio.sleep(0)
|
||||
finally:
|
||||
delegate.on_close(self)
|
790
venv/Lib/site-packages/tornado/httpclient.py
Normal file
790
venv/Lib/site-packages/tornado/httpclient.py
Normal file
|
@ -0,0 +1,790 @@
|
|||
"""Blocking and non-blocking HTTP client interfaces.
|
||||
|
||||
This module defines a common interface shared by two implementations,
|
||||
``simple_httpclient`` and ``curl_httpclient``. Applications may either
|
||||
instantiate their chosen implementation class directly or use the
|
||||
`AsyncHTTPClient` class from this module, which selects an implementation
|
||||
that can be overridden with the `AsyncHTTPClient.configure` method.
|
||||
|
||||
The default implementation is ``simple_httpclient``, and this is expected
|
||||
to be suitable for most users' needs. However, some applications may wish
|
||||
to switch to ``curl_httpclient`` for reasons such as the following:
|
||||
|
||||
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
|
||||
including support for HTTP proxies and the ability to use a specified
|
||||
network interface.
|
||||
|
||||
* ``curl_httpclient`` is more likely to be compatible with sites that are
|
||||
not-quite-compliant with the HTTP spec, or sites that use little-exercised
|
||||
features of HTTP.
|
||||
|
||||
* ``curl_httpclient`` is faster.
|
||||
|
||||
Note that if you are using ``curl_httpclient``, it is highly
|
||||
recommended that you use a recent version of ``libcurl`` and
|
||||
``pycurl``. Currently the minimum supported version of libcurl is
|
||||
7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
|
||||
recommended that your ``libcurl`` installation is built with
|
||||
asynchronous DNS resolver (threaded or c-ares), otherwise you may
|
||||
encounter various problems with request timeouts (for more
|
||||
information, see
|
||||
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
|
||||
and comments in curl_httpclient.py).
|
||||
|
||||
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
|
||||
|
||||
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
from io import BytesIO
|
||||
import ssl
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from tornado.concurrent import (
|
||||
Future,
|
||||
future_set_result_unless_cancelled,
|
||||
future_set_exception_unless_cancelled,
|
||||
)
|
||||
from tornado.escape import utf8, native_str
|
||||
from tornado import gen, httputil
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.util import Configurable
|
||||
|
||||
from typing import Type, Any, Union, Dict, Callable, Optional, cast
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
"""A blocking HTTP client.
|
||||
|
||||
This interface is provided to make it easier to share code between
|
||||
synchronous and asynchronous applications. Applications that are
|
||||
running an `.IOLoop` must use `AsyncHTTPClient` instead.
|
||||
|
||||
Typical usage looks like this::
|
||||
|
||||
http_client = httpclient.HTTPClient()
|
||||
try:
|
||||
response = http_client.fetch("http://www.google.com/")
|
||||
print(response.body)
|
||||
except httpclient.HTTPError as e:
|
||||
# HTTPError is raised for non-200 responses; the response
|
||||
# can be found in e.response.
|
||||
print("Error: " + str(e))
|
||||
except Exception as e:
|
||||
# Other errors are possible, such as IOError.
|
||||
print("Error: " + str(e))
|
||||
http_client.close()
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Due to limitations in `asyncio`, it is no longer possible to
|
||||
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
|
||||
Use `AsyncHTTPClient` instead.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
async_client_class: "Optional[Type[AsyncHTTPClient]]" = None,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
# Initialize self._closed at the beginning of the constructor
|
||||
# so that an exception raised here doesn't lead to confusing
|
||||
# failures in __del__.
|
||||
self._closed = True
|
||||
self._io_loop = IOLoop(make_current=False)
|
||||
if async_client_class is None:
|
||||
async_client_class = AsyncHTTPClient
|
||||
|
||||
# Create the client while our IOLoop is "current", without
|
||||
# clobbering the thread's real current IOLoop (if any).
|
||||
async def make_client() -> "AsyncHTTPClient":
|
||||
await gen.sleep(0)
|
||||
assert async_client_class is not None
|
||||
return async_client_class(**kwargs)
|
||||
|
||||
self._async_client = self._io_loop.run_sync(make_client)
|
||||
self._closed = False
|
||||
|
||||
def __del__(self) -> None:
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Closes the HTTPClient, freeing any resources used."""
|
||||
if not self._closed:
|
||||
self._async_client.close()
|
||||
self._io_loop.close()
|
||||
self._closed = True
|
||||
|
||||
def fetch(
|
||||
self, request: Union["HTTPRequest", str], **kwargs: Any
|
||||
) -> "HTTPResponse":
|
||||
"""Executes a request, returning an `HTTPResponse`.
|
||||
|
||||
The request may be either a string URL or an `HTTPRequest` object.
|
||||
If it is a string, we construct an `HTTPRequest` using any additional
|
||||
kwargs: ``HTTPRequest(request, **kwargs)``
|
||||
|
||||
If an error occurs during the fetch, we raise an `HTTPError` unless
|
||||
the ``raise_error`` keyword argument is set to False.
|
||||
"""
|
||||
response = self._io_loop.run_sync(
|
||||
functools.partial(self._async_client.fetch, request, **kwargs)
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
class AsyncHTTPClient(Configurable):
|
||||
"""An non-blocking HTTP client.
|
||||
|
||||
Example usage::
|
||||
|
||||
async def f():
|
||||
http_client = AsyncHTTPClient()
|
||||
try:
|
||||
response = await http_client.fetch("http://www.google.com")
|
||||
except Exception as e:
|
||||
print("Error: %s" % e)
|
||||
else:
|
||||
print(response.body)
|
||||
|
||||
The constructor for this class is magic in several respects: It
|
||||
actually creates an instance of an implementation-specific
|
||||
subclass, and instances are reused as a kind of pseudo-singleton
|
||||
(one per `.IOLoop`). The keyword argument ``force_instance=True``
|
||||
can be used to suppress this singleton behavior. Unless
|
||||
``force_instance=True`` is used, no arguments should be passed to
|
||||
the `AsyncHTTPClient` constructor. The implementation subclass as
|
||||
well as arguments to its constructor can be set with the static
|
||||
method `configure()`
|
||||
|
||||
All `AsyncHTTPClient` implementations support a ``defaults``
|
||||
keyword argument, which can be used to set default values for
|
||||
`HTTPRequest` attributes. For example::
|
||||
|
||||
AsyncHTTPClient.configure(
|
||||
None, defaults=dict(user_agent="MyUserAgent"))
|
||||
# or with force_instance:
|
||||
client = AsyncHTTPClient(force_instance=True,
|
||||
defaults=dict(user_agent="MyUserAgent"))
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
|
||||
"""
|
||||
|
||||
_instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls) -> Type[Configurable]:
|
||||
return AsyncHTTPClient
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls) -> Type[Configurable]:
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
|
||||
return SimpleAsyncHTTPClient
|
||||
|
||||
@classmethod
|
||||
def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]:
|
||||
attr_name = "_async_client_dict_" + cls.__name__
|
||||
if not hasattr(cls, attr_name):
|
||||
setattr(cls, attr_name, weakref.WeakKeyDictionary())
|
||||
return getattr(cls, attr_name)
|
||||
|
||||
def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient":
|
||||
io_loop = IOLoop.current()
|
||||
if force_instance:
|
||||
instance_cache = None
|
||||
else:
|
||||
instance_cache = cls._async_clients()
|
||||
if instance_cache is not None and io_loop in instance_cache:
|
||||
return instance_cache[io_loop]
|
||||
instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore
|
||||
# Make sure the instance knows which cache to remove itself from.
|
||||
# It can't simply call _async_clients() because we may be in
|
||||
# __new__(AsyncHTTPClient) but instance.__class__ may be
|
||||
# SimpleAsyncHTTPClient.
|
||||
instance._instance_cache = instance_cache
|
||||
if instance_cache is not None:
|
||||
instance_cache[instance.io_loop] = instance
|
||||
return instance
|
||||
|
||||
def initialize(self, defaults: Optional[Dict[str, Any]] = None) -> None:
|
||||
self.io_loop = IOLoop.current()
|
||||
self.defaults = dict(HTTPRequest._DEFAULTS)
|
||||
if defaults is not None:
|
||||
self.defaults.update(defaults)
|
||||
self._closed = False
|
||||
|
||||
def close(self) -> None:
|
||||
"""Destroys this HTTP client, freeing any file descriptors used.
|
||||
|
||||
This method is **not needed in normal use** due to the way
|
||||
that `AsyncHTTPClient` objects are transparently reused.
|
||||
``close()`` is generally only necessary when either the
|
||||
`.IOLoop` is also being closed, or the ``force_instance=True``
|
||||
argument was used when creating the `AsyncHTTPClient`.
|
||||
|
||||
No other methods may be called on the `AsyncHTTPClient` after
|
||||
``close()``.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
if self._instance_cache is not None:
|
||||
cached_val = self._instance_cache.pop(self.io_loop, None)
|
||||
# If there's an object other than self in the instance
|
||||
# cache for our IOLoop, something has gotten mixed up. A
|
||||
# value of None appears to be possible when this is called
|
||||
# from a destructor (HTTPClient.__del__) as the weakref
|
||||
# gets cleared before the destructor runs.
|
||||
if cached_val is not None and cached_val is not self:
|
||||
raise RuntimeError("inconsistent AsyncHTTPClient cache")
|
||||
|
||||
def fetch(
|
||||
self,
|
||||
request: Union[str, "HTTPRequest"],
|
||||
raise_error: bool = True,
|
||||
**kwargs: Any
|
||||
) -> "Future[HTTPResponse]":
|
||||
"""Executes a request, asynchronously returning an `HTTPResponse`.
|
||||
|
||||
The request may be either a string URL or an `HTTPRequest` object.
|
||||
If it is a string, we construct an `HTTPRequest` using any additional
|
||||
kwargs: ``HTTPRequest(request, **kwargs)``
|
||||
|
||||
This method returns a `.Future` whose result is an
|
||||
`HTTPResponse`. By default, the ``Future`` will raise an
|
||||
`HTTPError` if the request returned a non-200 response code
|
||||
(other errors may also be raised if the server could not be
|
||||
contacted). Instead, if ``raise_error`` is set to False, the
|
||||
response will always be returned regardless of the response
|
||||
code.
|
||||
|
||||
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
|
||||
In the callback interface, `HTTPError` is not automatically raised.
|
||||
Instead, you must check the response's ``error`` attribute or
|
||||
call its `~HTTPResponse.rethrow` method.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
The ``callback`` argument was removed. Use the returned
|
||||
`.Future` instead.
|
||||
|
||||
The ``raise_error=False`` argument only affects the
|
||||
`HTTPError` raised when a non-200 response code is used,
|
||||
instead of suppressing all errors.
|
||||
"""
|
||||
if self._closed:
|
||||
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
|
||||
if not isinstance(request, HTTPRequest):
|
||||
request = HTTPRequest(url=request, **kwargs)
|
||||
else:
|
||||
if kwargs:
|
||||
raise ValueError(
|
||||
"kwargs can't be used if request is an HTTPRequest object"
|
||||
)
|
||||
# We may modify this (to add Host, Accept-Encoding, etc),
|
||||
# so make sure we don't modify the caller's object. This is also
|
||||
# where normal dicts get converted to HTTPHeaders objects.
|
||||
request.headers = httputil.HTTPHeaders(request.headers)
|
||||
request_proxy = _RequestProxy(request, self.defaults)
|
||||
future = Future() # type: Future[HTTPResponse]
|
||||
|
||||
def handle_response(response: "HTTPResponse") -> None:
|
||||
if response.error:
|
||||
if raise_error or not response._error_is_response_code:
|
||||
future_set_exception_unless_cancelled(future, response.error)
|
||||
return
|
||||
future_set_result_unless_cancelled(future, response)
|
||||
|
||||
self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)
|
||||
return future
|
||||
|
||||
def fetch_impl(
|
||||
self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None]
|
||||
) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def configure(
|
||||
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
|
||||
) -> None:
|
||||
"""Configures the `AsyncHTTPClient` subclass to use.
|
||||
|
||||
``AsyncHTTPClient()`` actually creates an instance of a subclass.
|
||||
This method may be called with either a class object or the
|
||||
fully-qualified name of such a class (or ``None`` to use the default,
|
||||
``SimpleAsyncHTTPClient``)
|
||||
|
||||
If additional keyword arguments are given, they will be passed
|
||||
to the constructor of each subclass instance created. The
|
||||
keyword argument ``max_clients`` determines the maximum number
|
||||
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
|
||||
execute in parallel on each `.IOLoop`. Additional arguments
|
||||
may be supported depending on the implementation class in use.
|
||||
|
||||
Example::
|
||||
|
||||
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
|
||||
"""
|
||||
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
|
||||
|
||||
|
||||
class HTTPRequest(object):
|
||||
"""HTTP client request object."""
|
||||
|
||||
_headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]
|
||||
|
||||
# Default values for HTTPRequest parameters.
|
||||
# Merged with the values on the request object by AsyncHTTPClient
|
||||
# implementations.
|
||||
_DEFAULTS = dict(
|
||||
connect_timeout=20.0,
|
||||
request_timeout=20.0,
|
||||
follow_redirects=True,
|
||||
max_redirects=5,
|
||||
decompress_response=True,
|
||||
proxy_password="",
|
||||
allow_nonstandard_methods=False,
|
||||
validate_cert=True,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
method: str = "GET",
|
||||
headers: Optional[Union[Dict[str, str], httputil.HTTPHeaders]] = None,
|
||||
body: Optional[Union[bytes, str]] = None,
|
||||
auth_username: Optional[str] = None,
|
||||
auth_password: Optional[str] = None,
|
||||
auth_mode: Optional[str] = None,
|
||||
connect_timeout: Optional[float] = None,
|
||||
request_timeout: Optional[float] = None,
|
||||
if_modified_since: Optional[Union[float, datetime.datetime]] = None,
|
||||
follow_redirects: Optional[bool] = None,
|
||||
max_redirects: Optional[int] = None,
|
||||
user_agent: Optional[str] = None,
|
||||
use_gzip: Optional[bool] = None,
|
||||
network_interface: Optional[str] = None,
|
||||
streaming_callback: Optional[Callable[[bytes], None]] = None,
|
||||
header_callback: Optional[Callable[[str], None]] = None,
|
||||
prepare_curl_callback: Optional[Callable[[Any], None]] = None,
|
||||
proxy_host: Optional[str] = None,
|
||||
proxy_port: Optional[int] = None,
|
||||
proxy_username: Optional[str] = None,
|
||||
proxy_password: Optional[str] = None,
|
||||
proxy_auth_mode: Optional[str] = None,
|
||||
allow_nonstandard_methods: Optional[bool] = None,
|
||||
validate_cert: Optional[bool] = None,
|
||||
ca_certs: Optional[str] = None,
|
||||
allow_ipv6: Optional[bool] = None,
|
||||
client_key: Optional[str] = None,
|
||||
client_cert: Optional[str] = None,
|
||||
body_producer: Optional[
|
||||
Callable[[Callable[[bytes], None]], "Future[None]"]
|
||||
] = None,
|
||||
expect_100_continue: bool = False,
|
||||
decompress_response: Optional[bool] = None,
|
||||
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
|
||||
) -> None:
|
||||
r"""All parameters except ``url`` are optional.
|
||||
|
||||
:arg str url: URL to fetch
|
||||
:arg str method: HTTP method, e.g. "GET" or "POST"
|
||||
:arg headers: Additional HTTP headers to pass on the request
|
||||
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
|
||||
:arg body: HTTP request body as a string (byte or unicode; if unicode
|
||||
the utf-8 encoding will be used)
|
||||
:type body: `str` or `bytes`
|
||||
:arg collections.abc.Callable body_producer: Callable used for
|
||||
lazy/asynchronous request bodies.
|
||||
It is called with one argument, a ``write`` function, and should
|
||||
return a `.Future`. It should call the write function with new
|
||||
data as it becomes available. The write function returns a
|
||||
`.Future` which can be used for flow control.
|
||||
Only one of ``body`` and ``body_producer`` may
|
||||
be specified. ``body_producer`` is not supported on
|
||||
``curl_httpclient``. When using ``body_producer`` it is recommended
|
||||
to pass a ``Content-Length`` in the headers as otherwise chunked
|
||||
encoding will be used, and many servers do not support chunked
|
||||
encoding on requests. New in Tornado 4.0
|
||||
:arg str auth_username: Username for HTTP authentication
|
||||
:arg str auth_password: Password for HTTP authentication
|
||||
:arg str auth_mode: Authentication mode; default is "basic".
|
||||
Allowed values are implementation-defined; ``curl_httpclient``
|
||||
supports "basic" and "digest"; ``simple_httpclient`` only supports
|
||||
"basic"
|
||||
:arg float connect_timeout: Timeout for initial connection in seconds,
|
||||
default 20 seconds (0 means no timeout)
|
||||
:arg float request_timeout: Timeout for entire request in seconds,
|
||||
default 20 seconds (0 means no timeout)
|
||||
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
|
||||
:type if_modified_since: `datetime` or `float`
|
||||
:arg bool follow_redirects: Should redirects be followed automatically
|
||||
or return the 3xx response? Default True.
|
||||
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
|
||||
:arg str user_agent: String to send as ``User-Agent`` header
|
||||
:arg bool decompress_response: Request a compressed response from
|
||||
the server and decompress it after downloading. Default is True.
|
||||
New in Tornado 4.0.
|
||||
:arg bool use_gzip: Deprecated alias for ``decompress_response``
|
||||
since Tornado 4.0.
|
||||
:arg str network_interface: Network interface or source IP to use for request.
|
||||
See ``curl_httpclient`` note below.
|
||||
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
|
||||
be run with each chunk of data as it is received, and
|
||||
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
|
||||
the final response.
|
||||
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
|
||||
be run with each header line as it is received (including the
|
||||
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
|
||||
containing only ``\r\n``. All lines include the trailing newline
|
||||
characters). ``HTTPResponse.headers`` will be empty in the final
|
||||
response. This is most useful in conjunction with
|
||||
``streaming_callback``, because it's the only way to get access to
|
||||
header data while the request is in progress.
|
||||
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
|
||||
a ``pycurl.Curl`` object to allow the application to make additional
|
||||
``setopt`` calls.
|
||||
:arg str proxy_host: HTTP proxy hostname. To use proxies,
|
||||
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
|
||||
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
|
||||
currently only supported with ``curl_httpclient``.
|
||||
:arg int proxy_port: HTTP proxy port
|
||||
:arg str proxy_username: HTTP proxy username
|
||||
:arg str proxy_password: HTTP proxy password
|
||||
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
|
||||
default is "basic". supports "basic" and "digest"
|
||||
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
|
||||
argument? Default is False.
|
||||
:arg bool validate_cert: For HTTPS requests, validate the server's
|
||||
certificate? Default is True.
|
||||
:arg str ca_certs: filename of CA certificates in PEM format,
|
||||
or None to use defaults. See note below when used with
|
||||
``curl_httpclient``.
|
||||
:arg str client_key: Filename for client SSL key, if any. See
|
||||
note below when used with ``curl_httpclient``.
|
||||
:arg str client_cert: Filename for client SSL certificate, if any.
|
||||
See note below when used with ``curl_httpclient``.
|
||||
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
|
||||
``simple_httpclient`` (unsupported by ``curl_httpclient``).
|
||||
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
|
||||
and ``client_cert``.
|
||||
:arg bool allow_ipv6: Use IPv6 when available? Default is True.
|
||||
:arg bool expect_100_continue: If true, send the
|
||||
``Expect: 100-continue`` header and wait for a continue response
|
||||
before sending the request body. Only supported with
|
||||
``simple_httpclient``.
|
||||
|
||||
.. note::
|
||||
|
||||
When using ``curl_httpclient`` certain options may be
|
||||
inherited by subsequent fetches because ``pycurl`` does
|
||||
not allow them to be cleanly reset. This applies to the
|
||||
``ca_certs``, ``client_key``, ``client_cert``, and
|
||||
``network_interface`` arguments. If you use these
|
||||
options, you should pass them on every request (you don't
|
||||
have to always use the same values, but it's not possible
|
||||
to mix requests that specify these options with ones that
|
||||
use the defaults).
|
||||
|
||||
.. versionadded:: 3.1
|
||||
The ``auth_mode`` argument.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
The ``body_producer`` and ``expect_100_continue`` arguments.
|
||||
|
||||
.. versionadded:: 4.2
|
||||
The ``ssl_options`` argument.
|
||||
|
||||
.. versionadded:: 4.5
|
||||
The ``proxy_auth_mode`` argument.
|
||||
"""
|
||||
# Note that some of these attributes go through property setters
|
||||
# defined below.
|
||||
self.headers = headers # type: ignore
|
||||
if if_modified_since:
|
||||
self.headers["If-Modified-Since"] = httputil.format_timestamp(
|
||||
if_modified_since
|
||||
)
|
||||
self.proxy_host = proxy_host
|
||||
self.proxy_port = proxy_port
|
||||
self.proxy_username = proxy_username
|
||||
self.proxy_password = proxy_password
|
||||
self.proxy_auth_mode = proxy_auth_mode
|
||||
self.url = url
|
||||
self.method = method
|
||||
self.body = body # type: ignore
|
||||
self.body_producer = body_producer
|
||||
self.auth_username = auth_username
|
||||
self.auth_password = auth_password
|
||||
self.auth_mode = auth_mode
|
||||
self.connect_timeout = connect_timeout
|
||||
self.request_timeout = request_timeout
|
||||
self.follow_redirects = follow_redirects
|
||||
self.max_redirects = max_redirects
|
||||
self.user_agent = user_agent
|
||||
if decompress_response is not None:
|
||||
self.decompress_response = decompress_response # type: Optional[bool]
|
||||
else:
|
||||
self.decompress_response = use_gzip
|
||||
self.network_interface = network_interface
|
||||
self.streaming_callback = streaming_callback
|
||||
self.header_callback = header_callback
|
||||
self.prepare_curl_callback = prepare_curl_callback
|
||||
self.allow_nonstandard_methods = allow_nonstandard_methods
|
||||
self.validate_cert = validate_cert
|
||||
self.ca_certs = ca_certs
|
||||
self.allow_ipv6 = allow_ipv6
|
||||
self.client_key = client_key
|
||||
self.client_cert = client_cert
|
||||
self.ssl_options = ssl_options
|
||||
self.expect_100_continue = expect_100_continue
|
||||
self.start_time = time.time()
|
||||
|
||||
@property
|
||||
def headers(self) -> httputil.HTTPHeaders:
|
||||
# TODO: headers may actually be a plain dict until fairly late in
|
||||
# the process (AsyncHTTPClient.fetch), but practically speaking,
|
||||
# whenever the property is used they're already HTTPHeaders.
|
||||
return self._headers # type: ignore
|
||||
|
||||
@headers.setter
|
||||
def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:
|
||||
if value is None:
|
||||
self._headers = httputil.HTTPHeaders()
|
||||
else:
|
||||
self._headers = value # type: ignore
|
||||
|
||||
@property
|
||||
def body(self) -> bytes:
|
||||
return self._body
|
||||
|
||||
@body.setter
|
||||
def body(self, value: Union[bytes, str]) -> None:
|
||||
self._body = utf8(value)
|
||||
|
||||
|
||||
class HTTPResponse(object):
|
||||
"""HTTP Response object.
|
||||
|
||||
Attributes:
|
||||
|
||||
* ``request``: HTTPRequest object
|
||||
|
||||
* ``code``: numeric HTTP status code, e.g. 200 or 404
|
||||
|
||||
* ``reason``: human-readable reason phrase describing the status code
|
||||
|
||||
* ``headers``: `tornado.httputil.HTTPHeaders` object
|
||||
|
||||
* ``effective_url``: final location of the resource after following any
|
||||
redirects
|
||||
|
||||
* ``buffer``: ``cStringIO`` object for response body
|
||||
|
||||
* ``body``: response body as bytes (created on demand from ``self.buffer``)
|
||||
|
||||
* ``error``: Exception object, if any
|
||||
|
||||
* ``request_time``: seconds from request start to finish. Includes all
|
||||
network operations from DNS resolution to receiving the last byte of
|
||||
data. Does not include time spent in the queue (due to the
|
||||
``max_clients`` option). If redirects were followed, only includes
|
||||
the final request.
|
||||
|
||||
* ``start_time``: Time at which the HTTP operation started, based on
|
||||
`time.time` (not the monotonic clock used by `.IOLoop.time`). May
|
||||
be ``None`` if the request timed out while in the queue.
|
||||
|
||||
* ``time_info``: dictionary of diagnostic timing information from the
|
||||
request. Available data are subject to change, but currently uses timings
|
||||
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
|
||||
plus ``queue``, which is the delay (if any) introduced by waiting for
|
||||
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
|
||||
|
||||
.. versionadded:: 5.1
|
||||
|
||||
Added the ``start_time`` attribute.
|
||||
|
||||
.. versionchanged:: 5.1
|
||||
|
||||
The ``request_time`` attribute previously included time spent in the queue
|
||||
for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
|
||||
is excluded in both implementations. ``request_time`` is now more accurate for
|
||||
``curl_httpclient`` because it uses a monotonic clock when available.
|
||||
"""
|
||||
|
||||
# I'm not sure why these don't get type-inferred from the references in __init__.
|
||||
error = None # type: Optional[BaseException]
|
||||
_error_is_response_code = False
|
||||
request = None # type: HTTPRequest
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
request: HTTPRequest,
|
||||
code: int,
|
||||
headers: Optional[httputil.HTTPHeaders] = None,
|
||||
buffer: Optional[BytesIO] = None,
|
||||
effective_url: Optional[str] = None,
|
||||
error: Optional[BaseException] = None,
|
||||
request_time: Optional[float] = None,
|
||||
time_info: Optional[Dict[str, float]] = None,
|
||||
reason: Optional[str] = None,
|
||||
start_time: Optional[float] = None,
|
||||
) -> None:
|
||||
if isinstance(request, _RequestProxy):
|
||||
self.request = request.request
|
||||
else:
|
||||
self.request = request
|
||||
self.code = code
|
||||
self.reason = reason or httputil.responses.get(code, "Unknown")
|
||||
if headers is not None:
|
||||
self.headers = headers
|
||||
else:
|
||||
self.headers = httputil.HTTPHeaders()
|
||||
self.buffer = buffer
|
||||
self._body = None # type: Optional[bytes]
|
||||
if effective_url is None:
|
||||
self.effective_url = request.url
|
||||
else:
|
||||
self.effective_url = effective_url
|
||||
self._error_is_response_code = False
|
||||
if error is None:
|
||||
if self.code < 200 or self.code >= 300:
|
||||
self._error_is_response_code = True
|
||||
self.error = HTTPError(self.code, message=self.reason, response=self)
|
||||
else:
|
||||
self.error = None
|
||||
else:
|
||||
self.error = error
|
||||
self.start_time = start_time
|
||||
self.request_time = request_time
|
||||
self.time_info = time_info or {}
|
||||
|
||||
@property
|
||||
def body(self) -> bytes:
|
||||
if self.buffer is None:
|
||||
return b""
|
||||
elif self._body is None:
|
||||
self._body = self.buffer.getvalue()
|
||||
|
||||
return self._body
|
||||
|
||||
def rethrow(self) -> None:
|
||||
"""If there was an error on the request, raise an `HTTPError`."""
|
||||
if self.error:
|
||||
raise self.error
|
||||
|
||||
def __repr__(self) -> str:
|
||||
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
|
||||
return "%s(%s)" % (self.__class__.__name__, args)
|
||||
|
||||
|
||||
class HTTPClientError(Exception):
|
||||
"""Exception thrown for an unsuccessful HTTP request.
|
||||
|
||||
Attributes:
|
||||
|
||||
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
|
||||
used when no HTTP response was received, e.g. for a timeout.
|
||||
|
||||
* ``response`` - `HTTPResponse` object, if any.
|
||||
|
||||
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
|
||||
and you can look at ``error.response.headers['Location']`` to see the
|
||||
destination of the redirect.
|
||||
|
||||
.. versionchanged:: 5.1
|
||||
|
||||
Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with
|
||||
`tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains
|
||||
as an alias.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code: int,
|
||||
message: Optional[str] = None,
|
||||
response: Optional[HTTPResponse] = None,
|
||||
) -> None:
|
||||
self.code = code
|
||||
self.message = message or httputil.responses.get(code, "Unknown")
|
||||
self.response = response
|
||||
super().__init__(code, message, response)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "HTTP %d: %s" % (self.code, self.message)
|
||||
|
||||
# There is a cyclic reference between self and self.response,
|
||||
# which breaks the default __repr__ implementation.
|
||||
# (especially on pypy, which doesn't have the same recursion
|
||||
# detection as cpython).
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
HTTPError = HTTPClientError
|
||||
|
||||
|
||||
class _RequestProxy(object):
|
||||
"""Combines an object with a dictionary of defaults.
|
||||
|
||||
Used internally by AsyncHTTPClient implementations.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]
|
||||
) -> None:
|
||||
self.request = request
|
||||
self.defaults = defaults
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
request_attr = getattr(self.request, name)
|
||||
if request_attr is not None:
|
||||
return request_attr
|
||||
elif self.defaults is not None:
|
||||
return self.defaults.get(name, None)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def main() -> None:
|
||||
from tornado.options import define, options, parse_command_line
|
||||
|
||||
define("print_headers", type=bool, default=False)
|
||||
define("print_body", type=bool, default=True)
|
||||
define("follow_redirects", type=bool, default=True)
|
||||
define("validate_cert", type=bool, default=True)
|
||||
define("proxy_host", type=str)
|
||||
define("proxy_port", type=int)
|
||||
args = parse_command_line()
|
||||
client = HTTPClient()
|
||||
for arg in args:
|
||||
try:
|
||||
response = client.fetch(
|
||||
arg,
|
||||
follow_redirects=options.follow_redirects,
|
||||
validate_cert=options.validate_cert,
|
||||
proxy_host=options.proxy_host,
|
||||
proxy_port=options.proxy_port,
|
||||
)
|
||||
except HTTPError as e:
|
||||
if e.response is not None:
|
||||
response = e.response
|
||||
else:
|
||||
raise
|
||||
if options.print_headers:
|
||||
print(response.headers)
|
||||
if options.print_body:
|
||||
print(native_str(response.body))
|
||||
client.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
398
venv/Lib/site-packages/tornado/httpserver.py
Normal file
398
venv/Lib/site-packages/tornado/httpserver.py
Normal file
|
@ -0,0 +1,398 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A non-blocking, single-threaded HTTP server.
|
||||
|
||||
Typical applications have little direct interaction with the `HTTPServer`
|
||||
class except to start a server at the beginning of the process
|
||||
(and even that is often done indirectly via `tornado.web.Application.listen`).
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
|
||||
The ``HTTPRequest`` class that used to live in this module has been moved
|
||||
to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from tornado.escape import native_str
|
||||
from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
|
||||
from tornado import httputil
|
||||
from tornado import iostream
|
||||
from tornado import netutil
|
||||
from tornado.tcpserver import TCPServer
|
||||
from tornado.util import Configurable
|
||||
|
||||
import typing
|
||||
from typing import Union, Any, Dict, Callable, List, Type, Tuple, Optional, Awaitable
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Set # noqa: F401
|
||||
|
||||
|
||||
class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate):
|
||||
r"""A non-blocking, single-threaded HTTP server.
|
||||
|
||||
A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
|
||||
or, for backwards compatibility, a callback that takes an
|
||||
`.HTTPServerRequest` as an argument. The delegate is usually a
|
||||
`tornado.web.Application`.
|
||||
|
||||
`HTTPServer` supports keep-alive connections by default
|
||||
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
|
||||
requests ``Connection: keep-alive``).
|
||||
|
||||
If ``xheaders`` is ``True``, we support the
|
||||
``X-Real-Ip``/``X-Forwarded-For`` and
|
||||
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
|
||||
remote IP and URI scheme/protocol for all requests. These headers
|
||||
are useful when running Tornado behind a reverse proxy or load
|
||||
balancer. The ``protocol`` argument can also be set to ``https``
|
||||
if Tornado is run behind an SSL-decoding proxy that does not set one of
|
||||
the supported ``xheaders``.
|
||||
|
||||
By default, when parsing the ``X-Forwarded-For`` header, Tornado will
|
||||
select the last (i.e., the closest) address on the list of hosts as the
|
||||
remote host IP address. To select the next server in the chain, a list of
|
||||
trusted downstream hosts may be passed as the ``trusted_downstream``
|
||||
argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
|
||||
header.
|
||||
|
||||
To make this server serve SSL traffic, send the ``ssl_options`` keyword
|
||||
argument with an `ssl.SSLContext` object. For compatibility with older
|
||||
versions of Python ``ssl_options`` may also be a dictionary of keyword
|
||||
arguments for the `ssl.wrap_socket` method.::
|
||||
|
||||
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
|
||||
os.path.join(data_dir, "mydomain.key"))
|
||||
HTTPServer(application, ssl_options=ssl_ctx)
|
||||
|
||||
`HTTPServer` initialization follows one of three patterns (the
|
||||
initialization methods are defined on `tornado.tcpserver.TCPServer`):
|
||||
|
||||
1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
|
||||
|
||||
server = HTTPServer(app)
|
||||
server.listen(8888)
|
||||
IOLoop.current().start()
|
||||
|
||||
In many cases, `tornado.web.Application.listen` can be used to avoid
|
||||
the need to explicitly create the `HTTPServer`.
|
||||
|
||||
2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
|
||||
simple multi-process::
|
||||
|
||||
server = HTTPServer(app)
|
||||
server.bind(8888)
|
||||
server.start(0) # Forks multiple sub-processes
|
||||
IOLoop.current().start()
|
||||
|
||||
When using this interface, an `.IOLoop` must *not* be passed
|
||||
to the `HTTPServer` constructor. `~.TCPServer.start` will always start
|
||||
the server on the default singleton `.IOLoop`.
|
||||
|
||||
3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
|
||||
|
||||
sockets = tornado.netutil.bind_sockets(8888)
|
||||
tornado.process.fork_processes(0)
|
||||
server = HTTPServer(app)
|
||||
server.add_sockets(sockets)
|
||||
IOLoop.current().start()
|
||||
|
||||
The `~.TCPServer.add_sockets` interface is more complicated,
|
||||
but it can be used with `tornado.process.fork_processes` to
|
||||
give you more flexibility in when the fork happens.
|
||||
`~.TCPServer.add_sockets` can also be used in single-process
|
||||
servers if you want to create your listening sockets in some
|
||||
way other than `tornado.netutil.bind_sockets`.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
|
||||
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
|
||||
arguments. Added support for `.HTTPServerConnectionDelegate`
|
||||
instances as ``request_callback``.
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
`.HTTPServerConnectionDelegate.start_request` is now called with
|
||||
two arguments ``(server_conn, request_conn)`` (in accordance with the
|
||||
documentation) instead of one ``(request_conn)``.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
`HTTPServer` is now a subclass of `tornado.util.Configurable`.
|
||||
|
||||
.. versionchanged:: 4.5
|
||||
Added the ``trusted_downstream`` argument.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument has been removed.
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
# Ignore args to __init__; real initialization belongs in
|
||||
# initialize since we're Configurable. (there's something
|
||||
# weird in initialization order between this class,
|
||||
# Configurable, and TCPServer so we can't leave __init__ out
|
||||
# completely)
|
||||
pass
|
||||
|
||||
def initialize(
|
||||
self,
|
||||
request_callback: Union[
|
||||
httputil.HTTPServerConnectionDelegate,
|
||||
Callable[[httputil.HTTPServerRequest], None],
|
||||
],
|
||||
no_keep_alive: bool = False,
|
||||
xheaders: bool = False,
|
||||
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
|
||||
protocol: Optional[str] = None,
|
||||
decompress_request: bool = False,
|
||||
chunk_size: Optional[int] = None,
|
||||
max_header_size: Optional[int] = None,
|
||||
idle_connection_timeout: Optional[float] = None,
|
||||
body_timeout: Optional[float] = None,
|
||||
max_body_size: Optional[int] = None,
|
||||
max_buffer_size: Optional[int] = None,
|
||||
trusted_downstream: Optional[List[str]] = None,
|
||||
) -> None:
|
||||
# This method's signature is not extracted with autodoc
|
||||
# because we want its arguments to appear on the class
|
||||
# constructor. When changing this signature, also update the
|
||||
# copy in httpserver.rst.
|
||||
self.request_callback = request_callback
|
||||
self.xheaders = xheaders
|
||||
self.protocol = protocol
|
||||
self.conn_params = HTTP1ConnectionParameters(
|
||||
decompress=decompress_request,
|
||||
chunk_size=chunk_size,
|
||||
max_header_size=max_header_size,
|
||||
header_timeout=idle_connection_timeout or 3600,
|
||||
max_body_size=max_body_size,
|
||||
body_timeout=body_timeout,
|
||||
no_keep_alive=no_keep_alive,
|
||||
)
|
||||
TCPServer.__init__(
|
||||
self,
|
||||
ssl_options=ssl_options,
|
||||
max_buffer_size=max_buffer_size,
|
||||
read_chunk_size=chunk_size,
|
||||
)
|
||||
self._connections = set() # type: Set[HTTP1ServerConnection]
|
||||
self.trusted_downstream = trusted_downstream
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls) -> Type[Configurable]:
|
||||
return HTTPServer
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls) -> Type[Configurable]:
|
||||
return HTTPServer
|
||||
|
||||
async def close_all_connections(self) -> None:
|
||||
"""Close all open connections and asynchronously wait for them to finish.
|
||||
|
||||
This method is used in combination with `~.TCPServer.stop` to
|
||||
support clean shutdowns (especially for unittests). Typical
|
||||
usage would call ``stop()`` first to stop accepting new
|
||||
connections, then ``await close_all_connections()`` to wait for
|
||||
existing connections to finish.
|
||||
|
||||
This method does not currently close open websocket connections.
|
||||
|
||||
Note that this method is a coroutine and must be called with ``await``.
|
||||
|
||||
"""
|
||||
while self._connections:
|
||||
# Peek at an arbitrary element of the set
|
||||
conn = next(iter(self._connections))
|
||||
await conn.close()
|
||||
|
||||
def handle_stream(self, stream: iostream.IOStream, address: Tuple) -> None:
|
||||
context = _HTTPRequestContext(
|
||||
stream, address, self.protocol, self.trusted_downstream
|
||||
)
|
||||
conn = HTTP1ServerConnection(stream, self.conn_params, context)
|
||||
self._connections.add(conn)
|
||||
conn.start_serving(self)
|
||||
|
||||
def start_request(
|
||||
self, server_conn: object, request_conn: httputil.HTTPConnection
|
||||
) -> httputil.HTTPMessageDelegate:
|
||||
if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
|
||||
delegate = self.request_callback.start_request(server_conn, request_conn)
|
||||
else:
|
||||
delegate = _CallableAdapter(self.request_callback, request_conn)
|
||||
|
||||
if self.xheaders:
|
||||
delegate = _ProxyAdapter(delegate, request_conn)
|
||||
|
||||
return delegate
|
||||
|
||||
def on_close(self, server_conn: object) -> None:
|
||||
self._connections.remove(typing.cast(HTTP1ServerConnection, server_conn))
|
||||
|
||||
|
||||
class _CallableAdapter(httputil.HTTPMessageDelegate):
|
||||
def __init__(
|
||||
self,
|
||||
request_callback: Callable[[httputil.HTTPServerRequest], None],
|
||||
request_conn: httputil.HTTPConnection,
|
||||
) -> None:
|
||||
self.connection = request_conn
|
||||
self.request_callback = request_callback
|
||||
self.request = None # type: Optional[httputil.HTTPServerRequest]
|
||||
self.delegate = None
|
||||
self._chunks = [] # type: List[bytes]
|
||||
|
||||
def headers_received(
|
||||
self,
|
||||
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> Optional[Awaitable[None]]:
|
||||
self.request = httputil.HTTPServerRequest(
|
||||
connection=self.connection,
|
||||
start_line=typing.cast(httputil.RequestStartLine, start_line),
|
||||
headers=headers,
|
||||
)
|
||||
return None
|
||||
|
||||
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
|
||||
self._chunks.append(chunk)
|
||||
return None
|
||||
|
||||
def finish(self) -> None:
|
||||
assert self.request is not None
|
||||
self.request.body = b"".join(self._chunks)
|
||||
self.request._parse_body()
|
||||
self.request_callback(self.request)
|
||||
|
||||
def on_connection_close(self) -> None:
|
||||
del self._chunks
|
||||
|
||||
|
||||
class _HTTPRequestContext(object):
|
||||
def __init__(
|
||||
self,
|
||||
stream: iostream.IOStream,
|
||||
address: Tuple,
|
||||
protocol: Optional[str],
|
||||
trusted_downstream: Optional[List[str]] = None,
|
||||
) -> None:
|
||||
self.address = address
|
||||
# Save the socket's address family now so we know how to
|
||||
# interpret self.address even after the stream is closed
|
||||
# and its socket attribute replaced with None.
|
||||
if stream.socket is not None:
|
||||
self.address_family = stream.socket.family
|
||||
else:
|
||||
self.address_family = None
|
||||
# In HTTPServerRequest we want an IP, not a full socket address.
|
||||
if (
|
||||
self.address_family in (socket.AF_INET, socket.AF_INET6)
|
||||
and address is not None
|
||||
):
|
||||
self.remote_ip = address[0]
|
||||
else:
|
||||
# Unix (or other) socket; fake the remote address.
|
||||
self.remote_ip = "0.0.0.0"
|
||||
if protocol:
|
||||
self.protocol = protocol
|
||||
elif isinstance(stream, iostream.SSLIOStream):
|
||||
self.protocol = "https"
|
||||
else:
|
||||
self.protocol = "http"
|
||||
self._orig_remote_ip = self.remote_ip
|
||||
self._orig_protocol = self.protocol
|
||||
self.trusted_downstream = set(trusted_downstream or [])
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.address_family in (socket.AF_INET, socket.AF_INET6):
|
||||
return self.remote_ip
|
||||
elif isinstance(self.address, bytes):
|
||||
# Python 3 with the -bb option warns about str(bytes),
|
||||
# so convert it explicitly.
|
||||
# Unix socket addresses are str on mac but bytes on linux.
|
||||
return native_str(self.address)
|
||||
else:
|
||||
return str(self.address)
|
||||
|
||||
def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None:
|
||||
"""Rewrite the ``remote_ip`` and ``protocol`` fields."""
|
||||
# Squid uses X-Forwarded-For, others use X-Real-Ip
|
||||
ip = headers.get("X-Forwarded-For", self.remote_ip)
|
||||
# Skip trusted downstream hosts in X-Forwarded-For list
|
||||
for ip in (cand.strip() for cand in reversed(ip.split(","))):
|
||||
if ip not in self.trusted_downstream:
|
||||
break
|
||||
ip = headers.get("X-Real-Ip", ip)
|
||||
if netutil.is_valid_ip(ip):
|
||||
self.remote_ip = ip
|
||||
# AWS uses X-Forwarded-Proto
|
||||
proto_header = headers.get(
|
||||
"X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)
|
||||
)
|
||||
if proto_header:
|
||||
# use only the last proto entry if there is more than one
|
||||
# TODO: support trusting multiple layers of proxied protocol
|
||||
proto_header = proto_header.split(",")[-1].strip()
|
||||
if proto_header in ("http", "https"):
|
||||
self.protocol = proto_header
|
||||
|
||||
def _unapply_xheaders(self) -> None:
|
||||
"""Undo changes from `_apply_xheaders`.
|
||||
|
||||
Xheaders are per-request so they should not leak to the next
|
||||
request on the same connection.
|
||||
"""
|
||||
self.remote_ip = self._orig_remote_ip
|
||||
self.protocol = self._orig_protocol
|
||||
|
||||
|
||||
class _ProxyAdapter(httputil.HTTPMessageDelegate):
|
||||
def __init__(
|
||||
self,
|
||||
delegate: httputil.HTTPMessageDelegate,
|
||||
request_conn: httputil.HTTPConnection,
|
||||
) -> None:
|
||||
self.connection = request_conn
|
||||
self.delegate = delegate
|
||||
|
||||
def headers_received(
|
||||
self,
|
||||
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> Optional[Awaitable[None]]:
|
||||
# TODO: either make context an official part of the
|
||||
# HTTPConnection interface or figure out some other way to do this.
|
||||
self.connection.context._apply_xheaders(headers) # type: ignore
|
||||
return self.delegate.headers_received(start_line, headers)
|
||||
|
||||
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
|
||||
return self.delegate.data_received(chunk)
|
||||
|
||||
def finish(self) -> None:
|
||||
self.delegate.finish()
|
||||
self._cleanup()
|
||||
|
||||
def on_connection_close(self) -> None:
|
||||
self.delegate.on_connection_close()
|
||||
self._cleanup()
|
||||
|
||||
def _cleanup(self) -> None:
|
||||
self.connection.context._unapply_xheaders() # type: ignore
|
||||
|
||||
|
||||
HTTPRequest = httputil.HTTPServerRequest
|
1133
venv/Lib/site-packages/tornado/httputil.py
Normal file
1133
venv/Lib/site-packages/tornado/httputil.py
Normal file
File diff suppressed because it is too large
Load diff
944
venv/Lib/site-packages/tornado/ioloop.py
Normal file
944
venv/Lib/site-packages/tornado/ioloop.py
Normal file
|
@ -0,0 +1,944 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""An I/O event loop for non-blocking sockets.
|
||||
|
||||
In Tornado 6.0, `.IOLoop` is a wrapper around the `asyncio` event
|
||||
loop, with a slightly different interface for historical reasons.
|
||||
Applications can use either the `.IOLoop` interface or the underlying
|
||||
`asyncio` event loop directly (unless compatibility with older
|
||||
versions of Tornado is desired, in which case `.IOLoop` must be used).
|
||||
|
||||
Typical applications will use a single `IOLoop` object, accessed via
|
||||
`IOLoop.current` class method. The `IOLoop.start` method (or
|
||||
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
|
||||
be called at the end of the ``main()`` function. Atypical applications
|
||||
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
|
||||
per `unittest` case.
|
||||
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import datetime
|
||||
import functools
|
||||
import logging
|
||||
import numbers
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import math
|
||||
import random
|
||||
|
||||
from tornado.concurrent import (
|
||||
Future,
|
||||
is_future,
|
||||
chain_future,
|
||||
future_set_exc_info,
|
||||
future_add_done_callback,
|
||||
)
|
||||
from tornado.log import app_log
|
||||
from tornado.util import Configurable, TimeoutError, import_object
|
||||
|
||||
import typing
|
||||
from typing import Union, Any, Type, Optional, Callable, TypeVar, Tuple, Awaitable
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Dict, List # noqa: F401
|
||||
|
||||
from typing_extensions import Protocol
|
||||
else:
|
||||
Protocol = object
|
||||
|
||||
|
||||
class _Selectable(Protocol):
|
||||
def fileno(self) -> int:
|
||||
pass
|
||||
|
||||
def close(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_S = TypeVar("_S", bound=_Selectable)
|
||||
|
||||
|
||||
class IOLoop(Configurable):
|
||||
"""An I/O event loop.
|
||||
|
||||
As of Tornado 6.0, `IOLoop` is a wrapper around the `asyncio` event
|
||||
loop.
|
||||
|
||||
Example usage for a simple TCP server:
|
||||
|
||||
.. testcode::
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import socket
|
||||
|
||||
import tornado.ioloop
|
||||
from tornado.iostream import IOStream
|
||||
|
||||
async def handle_connection(connection, address):
|
||||
stream = IOStream(connection)
|
||||
message = await stream.read_until_close()
|
||||
print("message from client:", message.decode().strip())
|
||||
|
||||
def connection_ready(sock, fd, events):
|
||||
while True:
|
||||
try:
|
||||
connection, address = sock.accept()
|
||||
except BlockingIOError:
|
||||
return
|
||||
connection.setblocking(0)
|
||||
io_loop = tornado.ioloop.IOLoop.current()
|
||||
io_loop.spawn_callback(handle_connection, connection, address)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
sock.setblocking(0)
|
||||
sock.bind(("", 8888))
|
||||
sock.listen(128)
|
||||
|
||||
io_loop = tornado.ioloop.IOLoop.current()
|
||||
callback = functools.partial(connection_ready, sock)
|
||||
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
|
||||
io_loop.start()
|
||||
|
||||
.. testoutput::
|
||||
:hide:
|
||||
|
||||
By default, a newly-constructed `IOLoop` becomes the thread's current
|
||||
`IOLoop`, unless there already is a current `IOLoop`. This behavior
|
||||
can be controlled with the ``make_current`` argument to the `IOLoop`
|
||||
constructor: if ``make_current=True``, the new `IOLoop` will always
|
||||
try to become current and it raises an error if there is already a
|
||||
current instance. If ``make_current=False``, the new `IOLoop` will
|
||||
not try to become current.
|
||||
|
||||
In general, an `IOLoop` cannot survive a fork or be shared across
|
||||
processes in any way. When multiple processes are being used, each
|
||||
process should create its own `IOLoop`, which also implies that
|
||||
any objects which depend on the `IOLoop` (such as
|
||||
`.AsyncHTTPClient`) must also be created in the child processes.
|
||||
As a guideline, anything that starts processes (including the
|
||||
`tornado.process` and `multiprocessing` modules) should do so as
|
||||
early as possible, ideally the first thing the application does
|
||||
after loading its configuration in ``main()``.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
Added the ``make_current`` keyword argument to the `IOLoop`
|
||||
constructor.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Uses the `asyncio` event loop by default. The
|
||||
``IOLoop.configure`` method cannot be used on Python 3 except
|
||||
to redundantly specify the `asyncio` event loop.
|
||||
|
||||
"""
|
||||
|
||||
# These constants were originally based on constants from the epoll module.
|
||||
NONE = 0
|
||||
READ = 0x001
|
||||
WRITE = 0x004
|
||||
ERROR = 0x018
|
||||
|
||||
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
|
||||
_ioloop_for_asyncio = dict() # type: Dict[asyncio.AbstractEventLoop, IOLoop]
|
||||
|
||||
@classmethod
|
||||
def configure(
|
||||
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
|
||||
) -> None:
|
||||
if asyncio is not None:
|
||||
from tornado.platform.asyncio import BaseAsyncIOLoop
|
||||
|
||||
if isinstance(impl, str):
|
||||
impl = import_object(impl)
|
||||
if isinstance(impl, type) and not issubclass(impl, BaseAsyncIOLoop):
|
||||
raise RuntimeError(
|
||||
"only AsyncIOLoop is allowed when asyncio is available"
|
||||
)
|
||||
super(IOLoop, cls).configure(impl, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def instance() -> "IOLoop":
|
||||
"""Deprecated alias for `IOLoop.current()`.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Previously, this method returned a global singleton
|
||||
`IOLoop`, in contrast with the per-thread `IOLoop` returned
|
||||
by `current()`. In nearly all cases the two were the same
|
||||
(when they differed, it was generally used from non-Tornado
|
||||
threads to communicate back to the main thread's `IOLoop`).
|
||||
This distinction is not present in `asyncio`, so in order
|
||||
to facilitate integration with that package `instance()`
|
||||
was changed to be an alias to `current()`. Applications
|
||||
using the cross-thread communications aspect of
|
||||
`instance()` should instead set their own global variable
|
||||
to point to the `IOLoop` they want to use.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
"""
|
||||
return IOLoop.current()
|
||||
|
||||
def install(self) -> None:
|
||||
"""Deprecated alias for `make_current()`.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Previously, this method would set this `IOLoop` as the
|
||||
global singleton used by `IOLoop.instance()`. Now that
|
||||
`instance()` is an alias for `current()`, `install()`
|
||||
is an alias for `make_current()`.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
"""
|
||||
self.make_current()
|
||||
|
||||
@staticmethod
|
||||
def clear_instance() -> None:
|
||||
"""Deprecated alias for `clear_current()`.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Previously, this method would clear the `IOLoop` used as
|
||||
the global singleton by `IOLoop.instance()`. Now that
|
||||
`instance()` is an alias for `current()`,
|
||||
`clear_instance()` is an alias for `clear_current()`.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
|
||||
"""
|
||||
IOLoop.clear_current()
|
||||
|
||||
@typing.overload
|
||||
@staticmethod
|
||||
def current() -> "IOLoop":
|
||||
pass
|
||||
|
||||
@typing.overload
|
||||
@staticmethod
|
||||
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
|
||||
"""Returns the current thread's `IOLoop`.
|
||||
|
||||
If an `IOLoop` is currently running or has been marked as
|
||||
current by `make_current`, returns that instance. If there is
|
||||
no current `IOLoop` and ``instance`` is true, creates one.
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
Added ``instance`` argument to control the fallback to
|
||||
`IOLoop.instance()`.
|
||||
.. versionchanged:: 5.0
|
||||
On Python 3, control of the current `IOLoop` is delegated
|
||||
to `asyncio`, with this and other methods as pass-through accessors.
|
||||
The ``instance`` argument now controls whether an `IOLoop`
|
||||
is created automatically when there is none, instead of
|
||||
whether we fall back to `IOLoop.instance()` (which is now
|
||||
an alias for this method). ``instance=False`` is deprecated,
|
||||
since even if we do not create an `IOLoop`, this method
|
||||
may initialize the asyncio loop.
|
||||
"""
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
if not instance:
|
||||
return None
|
||||
raise
|
||||
try:
|
||||
return IOLoop._ioloop_for_asyncio[loop]
|
||||
except KeyError:
|
||||
if instance:
|
||||
from tornado.platform.asyncio import AsyncIOMainLoop
|
||||
|
||||
current = AsyncIOMainLoop(make_current=True) # type: Optional[IOLoop]
|
||||
else:
|
||||
current = None
|
||||
return current
|
||||
|
||||
def make_current(self) -> None:
|
||||
"""Makes this the `IOLoop` for the current thread.
|
||||
|
||||
An `IOLoop` automatically becomes current for its thread
|
||||
when it is started, but it is sometimes useful to call
|
||||
`make_current` explicitly before starting the `IOLoop`,
|
||||
so that code run at startup time can find the right
|
||||
instance.
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
An `IOLoop` created while there is no current `IOLoop`
|
||||
will automatically become current.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
This method also sets the current `asyncio` event loop.
|
||||
"""
|
||||
# The asyncio event loops override this method.
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def clear_current() -> None:
|
||||
"""Clears the `IOLoop` for the current thread.
|
||||
|
||||
Intended primarily for use by test frameworks in between tests.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
This method also clears the current `asyncio` event loop.
|
||||
"""
|
||||
old = IOLoop.current(instance=False)
|
||||
if old is not None:
|
||||
old._clear_current_hook()
|
||||
if asyncio is None:
|
||||
IOLoop._current.instance = None
|
||||
|
||||
def _clear_current_hook(self) -> None:
|
||||
"""Instance method called when an IOLoop ceases to be current.
|
||||
|
||||
May be overridden by subclasses as a counterpart to make_current.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls) -> Type[Configurable]:
|
||||
return IOLoop
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls) -> Type[Configurable]:
|
||||
from tornado.platform.asyncio import AsyncIOLoop
|
||||
|
||||
return AsyncIOLoop
|
||||
|
||||
def initialize(self, make_current: Optional[bool] = None) -> None:
|
||||
if make_current is None:
|
||||
if IOLoop.current(instance=False) is None:
|
||||
self.make_current()
|
||||
elif make_current:
|
||||
current = IOLoop.current(instance=False)
|
||||
# AsyncIO loops can already be current by this point.
|
||||
if current is not None and current is not self:
|
||||
raise RuntimeError("current IOLoop already exists")
|
||||
self.make_current()
|
||||
|
||||
def close(self, all_fds: bool = False) -> None:
|
||||
"""Closes the `IOLoop`, freeing any resources used.
|
||||
|
||||
If ``all_fds`` is true, all file descriptors registered on the
|
||||
IOLoop will be closed (not just the ones created by the
|
||||
`IOLoop` itself).
|
||||
|
||||
Many applications will only use a single `IOLoop` that runs for the
|
||||
entire lifetime of the process. In that case closing the `IOLoop`
|
||||
is not necessary since everything will be cleaned up when the
|
||||
process exits. `IOLoop.close` is provided mainly for scenarios
|
||||
such as unit tests, which create and destroy a large number of
|
||||
``IOLoops``.
|
||||
|
||||
An `IOLoop` must be completely stopped before it can be closed. This
|
||||
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
|
||||
be allowed to return before attempting to call `IOLoop.close()`.
|
||||
Therefore the call to `close` will usually appear just after
|
||||
the call to `start` rather than near the call to `stop`.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
If the `IOLoop` implementation supports non-integer objects
|
||||
for "file descriptors", those objects will have their
|
||||
``close`` method when ``all_fds`` is true.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@typing.overload
|
||||
def add_handler(
|
||||
self, fd: int, handler: Callable[[int, int], None], events: int
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@typing.overload # noqa: F811
|
||||
def add_handler(
|
||||
self, fd: _S, handler: Callable[[_S, int], None], events: int
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def add_handler( # noqa: F811
|
||||
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
|
||||
) -> None:
|
||||
"""Registers the given handler to receive the given events for ``fd``.
|
||||
|
||||
The ``fd`` argument may either be an integer file descriptor or
|
||||
a file-like object with a ``fileno()`` and ``close()`` method.
|
||||
|
||||
The ``events`` argument is a bitwise or of the constants
|
||||
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
|
||||
|
||||
When an event occurs, ``handler(fd, events)`` will be run.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_handler(self, fd: Union[int, _Selectable], events: int) -> None:
|
||||
"""Changes the events we listen for ``fd``.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_handler(self, fd: Union[int, _Selectable]) -> None:
|
||||
"""Stop listening for events on ``fd``.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def start(self) -> None:
|
||||
"""Starts the I/O loop.
|
||||
|
||||
The loop will run until one of the callbacks calls `stop()`, which
|
||||
will make the loop stop after the current event iteration completes.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""The IOLoop catches and logs exceptions, so it's
|
||||
important that log output be visible. However, python's
|
||||
default behavior for non-root loggers (prior to python
|
||||
3.2) is to print an unhelpful "no handlers could be
|
||||
found" message rather than the actual log entry, so we
|
||||
must explicitly configure logging if we've made it this
|
||||
far without anything.
|
||||
|
||||
This method should be called from start() in subclasses.
|
||||
"""
|
||||
if not any(
|
||||
[
|
||||
logging.getLogger().handlers,
|
||||
logging.getLogger("tornado").handlers,
|
||||
logging.getLogger("tornado.application").handlers,
|
||||
]
|
||||
):
|
||||
logging.basicConfig()
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop the I/O loop.
|
||||
|
||||
If the event loop is not currently running, the next call to `start()`
|
||||
will return immediately.
|
||||
|
||||
Note that even after `stop` has been called, the `IOLoop` is not
|
||||
completely stopped until `IOLoop.start` has also returned.
|
||||
Some work that was scheduled before the call to `stop` may still
|
||||
be run before the `IOLoop` shuts down.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def run_sync(self, func: Callable, timeout: Optional[float] = None) -> Any:
|
||||
"""Starts the `IOLoop`, runs the given function, and stops the loop.
|
||||
|
||||
The function must return either an awaitable object or
|
||||
``None``. If the function returns an awaitable object, the
|
||||
`IOLoop` will run until the awaitable is resolved (and
|
||||
`run_sync()` will return the awaitable's result). If it raises
|
||||
an exception, the `IOLoop` will stop and the exception will be
|
||||
re-raised to the caller.
|
||||
|
||||
The keyword-only argument ``timeout`` may be used to set
|
||||
a maximum duration for the function. If the timeout expires,
|
||||
a `tornado.util.TimeoutError` is raised.
|
||||
|
||||
This method is useful to allow asynchronous calls in a
|
||||
``main()`` function::
|
||||
|
||||
async def main():
|
||||
# do stuff...
|
||||
|
||||
if __name__ == '__main__':
|
||||
IOLoop.current().run_sync(main)
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Returning a non-``None``, non-awaitable value is now an error.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
If a timeout occurs, the ``func`` coroutine will be cancelled.
|
||||
|
||||
"""
|
||||
future_cell = [None] # type: List[Optional[Future]]
|
||||
|
||||
def run() -> None:
|
||||
try:
|
||||
result = func()
|
||||
if result is not None:
|
||||
from tornado.gen import convert_yielded
|
||||
|
||||
result = convert_yielded(result)
|
||||
except Exception:
|
||||
fut = Future() # type: Future[Any]
|
||||
future_cell[0] = fut
|
||||
future_set_exc_info(fut, sys.exc_info())
|
||||
else:
|
||||
if is_future(result):
|
||||
future_cell[0] = result
|
||||
else:
|
||||
fut = Future()
|
||||
future_cell[0] = fut
|
||||
fut.set_result(result)
|
||||
assert future_cell[0] is not None
|
||||
self.add_future(future_cell[0], lambda future: self.stop())
|
||||
|
||||
self.add_callback(run)
|
||||
if timeout is not None:
|
||||
|
||||
def timeout_callback() -> None:
|
||||
# If we can cancel the future, do so and wait on it. If not,
|
||||
# Just stop the loop and return with the task still pending.
|
||||
# (If we neither cancel nor wait for the task, a warning
|
||||
# will be logged).
|
||||
assert future_cell[0] is not None
|
||||
if not future_cell[0].cancel():
|
||||
self.stop()
|
||||
|
||||
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
|
||||
self.start()
|
||||
if timeout is not None:
|
||||
self.remove_timeout(timeout_handle)
|
||||
assert future_cell[0] is not None
|
||||
if future_cell[0].cancelled() or not future_cell[0].done():
|
||||
raise TimeoutError("Operation timed out after %s seconds" % timeout)
|
||||
return future_cell[0].result()
|
||||
|
||||
def time(self) -> float:
|
||||
"""Returns the current time according to the `IOLoop`'s clock.
|
||||
|
||||
The return value is a floating-point number relative to an
|
||||
unspecified time in the past.
|
||||
|
||||
Historically, the IOLoop could be customized to use e.g.
|
||||
`time.monotonic` instead of `time.time`, but this is not
|
||||
currently supported and so this method is equivalent to
|
||||
`time.time`.
|
||||
|
||||
"""
|
||||
return time.time()
|
||||
|
||||
def add_timeout(
|
||||
self,
|
||||
deadline: Union[float, datetime.timedelta],
|
||||
callback: Callable[..., None],
|
||||
*args: Any,
|
||||
**kwargs: Any
|
||||
) -> object:
|
||||
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
|
||||
|
||||
Returns an opaque handle that may be passed to
|
||||
`remove_timeout` to cancel.
|
||||
|
||||
``deadline`` may be a number denoting a time (on the same
|
||||
scale as `IOLoop.time`, normally `time.time`), or a
|
||||
`datetime.timedelta` object for a deadline relative to the
|
||||
current time. Since Tornado 4.0, `call_later` is a more
|
||||
convenient alternative for the relative case since it does not
|
||||
require a timedelta object.
|
||||
|
||||
Note that it is not safe to call `add_timeout` from other threads.
|
||||
Instead, you must use `add_callback` to transfer control to the
|
||||
`IOLoop`'s thread, and then call `add_timeout` from there.
|
||||
|
||||
Subclasses of IOLoop must implement either `add_timeout` or
|
||||
`call_at`; the default implementations of each will call
|
||||
the other. `call_at` is usually easier to implement, but
|
||||
subclasses that wish to maintain compatibility with Tornado
|
||||
versions prior to 4.0 must use `add_timeout` instead.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Now passes through ``*args`` and ``**kwargs`` to the callback.
|
||||
"""
|
||||
if isinstance(deadline, numbers.Real):
|
||||
return self.call_at(deadline, callback, *args, **kwargs)
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
return self.call_at(
|
||||
self.time() + deadline.total_seconds(), callback, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r" % deadline)
|
||||
|
||||
def call_later(
|
||||
self, delay: float, callback: Callable[..., None], *args: Any, **kwargs: Any
|
||||
) -> object:
|
||||
"""Runs the ``callback`` after ``delay`` seconds have passed.
|
||||
|
||||
Returns an opaque handle that may be passed to `remove_timeout`
|
||||
to cancel. Note that unlike the `asyncio` method of the same
|
||||
name, the returned object does not have a ``cancel()`` method.
|
||||
|
||||
See `add_timeout` for comments on thread-safety and subclassing.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
return self.call_at(self.time() + delay, callback, *args, **kwargs)
|
||||
|
||||
def call_at(
|
||||
self, when: float, callback: Callable[..., None], *args: Any, **kwargs: Any
|
||||
) -> object:
|
||||
"""Runs the ``callback`` at the absolute time designated by ``when``.
|
||||
|
||||
``when`` must be a number using the same reference point as
|
||||
`IOLoop.time`.
|
||||
|
||||
Returns an opaque handle that may be passed to `remove_timeout`
|
||||
to cancel. Note that unlike the `asyncio` method of the same
|
||||
name, the returned object does not have a ``cancel()`` method.
|
||||
|
||||
See `add_timeout` for comments on thread-safety and subclassing.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
return self.add_timeout(when, callback, *args, **kwargs)
|
||||
|
||||
def remove_timeout(self, timeout: object) -> None:
|
||||
"""Cancels a pending timeout.
|
||||
|
||||
The argument is a handle as returned by `add_timeout`. It is
|
||||
safe to call `remove_timeout` even if the callback has already
|
||||
been run.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
It is safe to call this method from any thread at any time,
|
||||
except from a signal handler. Note that this is the **only**
|
||||
method in `IOLoop` that makes this thread-safety guarantee; all
|
||||
other interaction with the `IOLoop` must be done from that
|
||||
`IOLoop`'s thread. `add_callback()` may be used to transfer
|
||||
control from other threads to the `IOLoop`'s thread.
|
||||
|
||||
To add a callback from a signal handler, see
|
||||
`add_callback_from_signal`.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_callback_from_signal(
|
||||
self, callback: Callable, *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
"""Calls the given callback on the next I/O loop iteration.
|
||||
|
||||
Safe for use from a Python signal handler; should not be used
|
||||
otherwise.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def spawn_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
|
||||
"""Calls the given callback on the next IOLoop iteration.
|
||||
|
||||
As of Tornado 6.0, this method is equivalent to `add_callback`.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
self.add_callback(callback, *args, **kwargs)
|
||||
|
||||
def add_future(
|
||||
self,
|
||||
future: "Union[Future[_T], concurrent.futures.Future[_T]]",
|
||||
callback: Callable[["Future[_T]"], None],
|
||||
) -> None:
|
||||
"""Schedules a callback on the ``IOLoop`` when the given
|
||||
`.Future` is finished.
|
||||
|
||||
The callback is invoked with one argument, the
|
||||
`.Future`.
|
||||
|
||||
This method only accepts `.Future` objects and not other
|
||||
awaitables (unlike most of Tornado where the two are
|
||||
interchangeable).
|
||||
"""
|
||||
if isinstance(future, Future):
|
||||
# Note that we specifically do not want the inline behavior of
|
||||
# tornado.concurrent.future_add_done_callback. We always want
|
||||
# this callback scheduled on the next IOLoop iteration (which
|
||||
# asyncio.Future always does).
|
||||
#
|
||||
# Wrap the callback in self._run_callback so we control
|
||||
# the error logging (i.e. it goes to tornado.log.app_log
|
||||
# instead of asyncio's log).
|
||||
future.add_done_callback(
|
||||
lambda f: self._run_callback(functools.partial(callback, future))
|
||||
)
|
||||
else:
|
||||
assert is_future(future)
|
||||
# For concurrent futures, we use self.add_callback, so
|
||||
# it's fine if future_add_done_callback inlines that call.
|
||||
future_add_done_callback(
|
||||
future, lambda f: self.add_callback(callback, future)
|
||||
)
|
||||
|
||||
def run_in_executor(
|
||||
self,
|
||||
executor: Optional[concurrent.futures.Executor],
|
||||
func: Callable[..., _T],
|
||||
*args: Any
|
||||
) -> Awaitable[_T]:
|
||||
"""Runs a function in a ``concurrent.futures.Executor``. If
|
||||
``executor`` is ``None``, the IO loop's default executor will be used.
|
||||
|
||||
Use `functools.partial` to pass keyword arguments to ``func``.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
if executor is None:
|
||||
if not hasattr(self, "_executor"):
|
||||
from tornado.process import cpu_count
|
||||
|
||||
self._executor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=(cpu_count() * 5)
|
||||
) # type: concurrent.futures.Executor
|
||||
executor = self._executor
|
||||
c_future = executor.submit(func, *args)
|
||||
# Concurrent Futures are not usable with await. Wrap this in a
|
||||
# Tornado Future instead, using self.add_future for thread-safety.
|
||||
t_future = Future() # type: Future[_T]
|
||||
self.add_future(c_future, lambda f: chain_future(f, t_future))
|
||||
return t_future
|
||||
|
||||
def set_default_executor(self, executor: concurrent.futures.Executor) -> None:
|
||||
"""Sets the default executor to use with :meth:`run_in_executor`.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
self._executor = executor
|
||||
|
||||
def _run_callback(self, callback: Callable[[], Any]) -> None:
|
||||
"""Runs a callback with error handling.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
CancelledErrors are no longer logged.
|
||||
"""
|
||||
try:
|
||||
ret = callback()
|
||||
if ret is not None:
|
||||
from tornado import gen
|
||||
|
||||
# Functions that return Futures typically swallow all
|
||||
# exceptions and store them in the Future. If a Future
|
||||
# makes it out to the IOLoop, ensure its exception (if any)
|
||||
# gets logged too.
|
||||
try:
|
||||
ret = gen.convert_yielded(ret)
|
||||
except gen.BadYieldError:
|
||||
# It's not unusual for add_callback to be used with
|
||||
# methods returning a non-None and non-yieldable
|
||||
# result, which should just be ignored.
|
||||
pass
|
||||
else:
|
||||
self.add_future(ret, self._discard_future_result)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
|
||||
def _discard_future_result(self, future: Future) -> None:
|
||||
"""Avoid unhandled-exception warnings from spawned coroutines."""
|
||||
future.result()
|
||||
|
||||
def split_fd(
|
||||
self, fd: Union[int, _Selectable]
|
||||
) -> Tuple[int, Union[int, _Selectable]]:
|
||||
# """Returns an (fd, obj) pair from an ``fd`` parameter.
|
||||
|
||||
# We accept both raw file descriptors and file-like objects as
|
||||
# input to `add_handler` and related methods. When a file-like
|
||||
# object is passed, we must retain the object itself so we can
|
||||
# close it correctly when the `IOLoop` shuts down, but the
|
||||
# poller interfaces favor file descriptors (they will accept
|
||||
# file-like objects and call ``fileno()`` for you, but they
|
||||
# always return the descriptor itself).
|
||||
|
||||
# This method is provided for use by `IOLoop` subclasses and should
|
||||
# not generally be used by application code.
|
||||
|
||||
# .. versionadded:: 4.0
|
||||
# """
|
||||
if isinstance(fd, int):
|
||||
return fd, fd
|
||||
return fd.fileno(), fd
|
||||
|
||||
def close_fd(self, fd: Union[int, _Selectable]) -> None:
|
||||
# """Utility method to close an ``fd``.
|
||||
|
||||
# If ``fd`` is a file-like object, we close it directly; otherwise
|
||||
# we use `os.close`.
|
||||
|
||||
# This method is provided for use by `IOLoop` subclasses (in
|
||||
# implementations of ``IOLoop.close(all_fds=True)`` and should
|
||||
# not generally be used by application code.
|
||||
|
||||
# .. versionadded:: 4.0
|
||||
# """
|
||||
try:
|
||||
if isinstance(fd, int):
|
||||
os.close(fd)
|
||||
else:
|
||||
fd.close()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
class _Timeout(object):
|
||||
"""An IOLoop timeout, a UNIX timestamp and a callback"""
|
||||
|
||||
# Reduce memory overhead when there are lots of pending callbacks
|
||||
__slots__ = ["deadline", "callback", "tdeadline"]
|
||||
|
||||
def __init__(
|
||||
self, deadline: float, callback: Callable[[], None], io_loop: IOLoop
|
||||
) -> None:
|
||||
if not isinstance(deadline, numbers.Real):
|
||||
raise TypeError("Unsupported deadline %r" % deadline)
|
||||
self.deadline = deadline
|
||||
self.callback = callback
|
||||
self.tdeadline = (
|
||||
deadline,
|
||||
next(io_loop._timeout_counter),
|
||||
) # type: Tuple[float, int]
|
||||
|
||||
# Comparison methods to sort by deadline, with object id as a tiebreaker
|
||||
# to guarantee a consistent ordering. The heapq module uses __le__
|
||||
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
|
||||
# use __lt__).
|
||||
def __lt__(self, other: "_Timeout") -> bool:
|
||||
return self.tdeadline < other.tdeadline
|
||||
|
||||
def __le__(self, other: "_Timeout") -> bool:
|
||||
return self.tdeadline <= other.tdeadline
|
||||
|
||||
|
||||
class PeriodicCallback(object):
|
||||
"""Schedules the given callback to be called periodically.
|
||||
|
||||
The callback is called every ``callback_time`` milliseconds.
|
||||
Note that the timeout is given in milliseconds, while most other
|
||||
time-related functions in Tornado use seconds.
|
||||
|
||||
If ``jitter`` is specified, each callback time will be randomly selected
|
||||
within a window of ``jitter * callback_time`` milliseconds.
|
||||
Jitter can be used to reduce alignment of events with similar periods.
|
||||
A jitter of 0.1 means allowing a 10% variation in callback time.
|
||||
The window is centered on ``callback_time`` so the total number of calls
|
||||
within a given interval should not be significantly affected by adding
|
||||
jitter.
|
||||
|
||||
If the callback runs for longer than ``callback_time`` milliseconds,
|
||||
subsequent invocations will be skipped to get back on schedule.
|
||||
|
||||
`start` must be called after the `PeriodicCallback` is created.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
|
||||
.. versionchanged:: 5.1
|
||||
The ``jitter`` argument is added.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, callback: Callable[[], None], callback_time: float, jitter: float = 0
|
||||
) -> None:
|
||||
self.callback = callback
|
||||
if callback_time <= 0:
|
||||
raise ValueError("Periodic callback must have a positive callback_time")
|
||||
self.callback_time = callback_time
|
||||
self.jitter = jitter
|
||||
self._running = False
|
||||
self._timeout = None # type: object
|
||||
|
||||
def start(self) -> None:
|
||||
"""Starts the timer."""
|
||||
# Looking up the IOLoop here allows to first instantiate the
|
||||
# PeriodicCallback in another thread, then start it using
|
||||
# IOLoop.add_callback().
|
||||
self.io_loop = IOLoop.current()
|
||||
self._running = True
|
||||
self._next_timeout = self.io_loop.time()
|
||||
self._schedule_next()
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stops the timer."""
|
||||
self._running = False
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = None
|
||||
|
||||
def is_running(self) -> bool:
|
||||
"""Returns ``True`` if this `.PeriodicCallback` has been started.
|
||||
|
||||
.. versionadded:: 4.1
|
||||
"""
|
||||
return self._running
|
||||
|
||||
def _run(self) -> None:
|
||||
if not self._running:
|
||||
return
|
||||
try:
|
||||
return self.callback()
|
||||
except Exception:
|
||||
app_log.error("Exception in callback %r", self.callback, exc_info=True)
|
||||
finally:
|
||||
self._schedule_next()
|
||||
|
||||
def _schedule_next(self) -> None:
|
||||
if self._running:
|
||||
self._update_next(self.io_loop.time())
|
||||
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
||||
|
||||
def _update_next(self, current_time: float) -> None:
|
||||
callback_time_sec = self.callback_time / 1000.0
|
||||
if self.jitter:
|
||||
# apply jitter fraction
|
||||
callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5))
|
||||
if self._next_timeout <= current_time:
|
||||
# The period should be measured from the start of one call
|
||||
# to the start of the next. If one call takes too long,
|
||||
# skip cycles to get back to a multiple of the original
|
||||
# schedule.
|
||||
self._next_timeout += (
|
||||
math.floor((current_time - self._next_timeout) / callback_time_sec) + 1
|
||||
) * callback_time_sec
|
||||
else:
|
||||
# If the clock moved backwards, ensure we advance the next
|
||||
# timeout instead of recomputing the same value again.
|
||||
# This may result in long gaps between callbacks if the
|
||||
# clock jumps backwards by a lot, but the far more common
|
||||
# scenario is a small NTP adjustment that should just be
|
||||
# ignored.
|
||||
#
|
||||
# Note that on some systems if time.time() runs slower
|
||||
# than time.monotonic() (most common on windows), we
|
||||
# effectively experience a small backwards time jump on
|
||||
# every iteration because PeriodicCallback uses
|
||||
# time.time() while asyncio schedules callbacks using
|
||||
# time.monotonic().
|
||||
# https://github.com/tornadoweb/tornado/issues/2333
|
||||
self._next_timeout += callback_time_sec
|
1660
venv/Lib/site-packages/tornado/iostream.py
Normal file
1660
venv/Lib/site-packages/tornado/iostream.py
Normal file
File diff suppressed because it is too large
Load diff
581
venv/Lib/site-packages/tornado/locale.py
Normal file
581
venv/Lib/site-packages/tornado/locale.py
Normal file
|
@ -0,0 +1,581 @@
|
|||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Translation methods for generating localized strings.
|
||||
|
||||
To load a locale and generate a translated string::
|
||||
|
||||
user_locale = tornado.locale.get("es_LA")
|
||||
print(user_locale.translate("Sign out"))
|
||||
|
||||
`tornado.locale.get()` returns the closest matching locale, not necessarily the
|
||||
specific locale you requested. You can support pluralization with
|
||||
additional arguments to `~Locale.translate()`, e.g.::
|
||||
|
||||
people = [...]
|
||||
message = user_locale.translate(
|
||||
"%(list)s is online", "%(list)s are online", len(people))
|
||||
print(message % {"list": user_locale.list(people)})
|
||||
|
||||
The first string is chosen if ``len(people) == 1``, otherwise the second
|
||||
string is chosen.
|
||||
|
||||
Applications should call one of `load_translations` (which uses a simple
|
||||
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
|
||||
supported by `gettext` and related tools). If neither method is called,
|
||||
the `Locale.translate` method will simply return the original string.
|
||||
"""
|
||||
|
||||
import codecs
|
||||
import csv
|
||||
import datetime
|
||||
import gettext
|
||||
import os
|
||||
import re
|
||||
|
||||
from tornado import escape
|
||||
from tornado.log import gen_log
|
||||
|
||||
from tornado._locale_data import LOCALE_NAMES
|
||||
|
||||
from typing import Iterable, Any, Union, Dict, Optional
|
||||
|
||||
_default_locale = "en_US"
|
||||
_translations = {} # type: Dict[str, Any]
|
||||
_supported_locales = frozenset([_default_locale])
|
||||
_use_gettext = False
|
||||
CONTEXT_SEPARATOR = "\x04"
|
||||
|
||||
|
||||
def get(*locale_codes: str) -> "Locale":
|
||||
"""Returns the closest match for the given locale codes.
|
||||
|
||||
We iterate over all given locale codes in order. If we have a tight
|
||||
or a loose match for the code (e.g., "en" for "en_US"), we return
|
||||
the locale. Otherwise we move to the next code in the list.
|
||||
|
||||
By default we return ``en_US`` if no translations are found for any of
|
||||
the specified locales. You can change the default locale with
|
||||
`set_default_locale()`.
|
||||
"""
|
||||
return Locale.get_closest(*locale_codes)
|
||||
|
||||
|
||||
def set_default_locale(code: str) -> None:
|
||||
"""Sets the default locale.
|
||||
|
||||
The default locale is assumed to be the language used for all strings
|
||||
in the system. The translations loaded from disk are mappings from
|
||||
the default locale to the destination locale. Consequently, you don't
|
||||
need to create a translation file for the default locale.
|
||||
"""
|
||||
global _default_locale
|
||||
global _supported_locales
|
||||
_default_locale = code
|
||||
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
|
||||
|
||||
|
||||
def load_translations(directory: str, encoding: Optional[str] = None) -> None:
|
||||
"""Loads translations from CSV files in a directory.
|
||||
|
||||
Translations are strings with optional Python-style named placeholders
|
||||
(e.g., ``My name is %(name)s``) and their associated translations.
|
||||
|
||||
The directory should have translation files of the form ``LOCALE.csv``,
|
||||
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
|
||||
translation, and an optional plural indicator. Plural indicators should
|
||||
be one of "plural" or "singular". A given string can have both singular
|
||||
and plural forms. For example ``%(name)s liked this`` may have a
|
||||
different verb conjugation depending on whether %(name)s is one
|
||||
name or a list of names. There should be two rows in the CSV file for
|
||||
that string, one with plural indicator "singular", and one "plural".
|
||||
For strings with no verbs that would change on translation, simply
|
||||
use "unknown" or the empty string (or don't include the column at all).
|
||||
|
||||
The file is read using the `csv` module in the default "excel" dialect.
|
||||
In this format there should not be spaces after the commas.
|
||||
|
||||
If no ``encoding`` parameter is given, the encoding will be
|
||||
detected automatically (among UTF-8 and UTF-16) if the file
|
||||
contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM
|
||||
is present.
|
||||
|
||||
Example translation ``es_LA.csv``::
|
||||
|
||||
"I love you","Te amo"
|
||||
"%(name)s liked this","A %(name)s les gustó esto","plural"
|
||||
"%(name)s liked this","A %(name)s le gustó esto","singular"
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Added ``encoding`` parameter. Added support for BOM-based encoding
|
||||
detection, UTF-16, and UTF-8-with-BOM.
|
||||
"""
|
||||
global _translations
|
||||
global _supported_locales
|
||||
_translations = {}
|
||||
for path in os.listdir(directory):
|
||||
if not path.endswith(".csv"):
|
||||
continue
|
||||
locale, extension = path.split(".")
|
||||
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
|
||||
gen_log.error(
|
||||
"Unrecognized locale %r (path: %s)",
|
||||
locale,
|
||||
os.path.join(directory, path),
|
||||
)
|
||||
continue
|
||||
full_path = os.path.join(directory, path)
|
||||
if encoding is None:
|
||||
# Try to autodetect encoding based on the BOM.
|
||||
with open(full_path, "rb") as bf:
|
||||
data = bf.read(len(codecs.BOM_UTF16_LE))
|
||||
if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
|
||||
encoding = "utf-16"
|
||||
else:
|
||||
# utf-8-sig is "utf-8 with optional BOM". It's discouraged
|
||||
# in most cases but is common with CSV files because Excel
|
||||
# cannot read utf-8 files without a BOM.
|
||||
encoding = "utf-8-sig"
|
||||
# python 3: csv.reader requires a file open in text mode.
|
||||
# Specify an encoding to avoid dependence on $LANG environment variable.
|
||||
with open(full_path, encoding=encoding) as f:
|
||||
_translations[locale] = {}
|
||||
for i, row in enumerate(csv.reader(f)):
|
||||
if not row or len(row) < 2:
|
||||
continue
|
||||
row = [escape.to_unicode(c).strip() for c in row]
|
||||
english, translation = row[:2]
|
||||
if len(row) > 2:
|
||||
plural = row[2] or "unknown"
|
||||
else:
|
||||
plural = "unknown"
|
||||
if plural not in ("plural", "singular", "unknown"):
|
||||
gen_log.error(
|
||||
"Unrecognized plural indicator %r in %s line %d",
|
||||
plural,
|
||||
path,
|
||||
i + 1,
|
||||
)
|
||||
continue
|
||||
_translations[locale].setdefault(plural, {})[english] = translation
|
||||
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
|
||||
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
|
||||
|
||||
def load_gettext_translations(directory: str, domain: str) -> None:
|
||||
"""Loads translations from `gettext`'s locale tree
|
||||
|
||||
Locale tree is similar to system's ``/usr/share/locale``, like::
|
||||
|
||||
{directory}/{lang}/LC_MESSAGES/{domain}.mo
|
||||
|
||||
Three steps are required to have your app translated:
|
||||
|
||||
1. Generate POT translation file::
|
||||
|
||||
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
|
||||
|
||||
2. Merge against existing POT file::
|
||||
|
||||
msgmerge old.po mydomain.po > new.po
|
||||
|
||||
3. Compile::
|
||||
|
||||
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
|
||||
"""
|
||||
global _translations
|
||||
global _supported_locales
|
||||
global _use_gettext
|
||||
_translations = {}
|
||||
for lang in os.listdir(directory):
|
||||
if lang.startswith("."):
|
||||
continue # skip .svn, etc
|
||||
if os.path.isfile(os.path.join(directory, lang)):
|
||||
continue
|
||||
try:
|
||||
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
|
||||
_translations[lang] = gettext.translation(
|
||||
domain, directory, languages=[lang]
|
||||
)
|
||||
except Exception as e:
|
||||
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
|
||||
continue
|
||||
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
|
||||
_use_gettext = True
|
||||
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
|
||||
|
||||
|
||||
def get_supported_locales() -> Iterable[str]:
|
||||
"""Returns a list of all the supported locale codes."""
|
||||
return _supported_locales
|
||||
|
||||
|
||||
class Locale(object):
|
||||
"""Object representing a locale.
|
||||
|
||||
After calling one of `load_translations` or `load_gettext_translations`,
|
||||
call `get` or `get_closest` to get a Locale object.
|
||||
"""
|
||||
|
||||
_cache = {} # type: Dict[str, Locale]
|
||||
|
||||
@classmethod
|
||||
def get_closest(cls, *locale_codes: str) -> "Locale":
|
||||
"""Returns the closest match for the given locale code."""
|
||||
for code in locale_codes:
|
||||
if not code:
|
||||
continue
|
||||
code = code.replace("-", "_")
|
||||
parts = code.split("_")
|
||||
if len(parts) > 2:
|
||||
continue
|
||||
elif len(parts) == 2:
|
||||
code = parts[0].lower() + "_" + parts[1].upper()
|
||||
if code in _supported_locales:
|
||||
return cls.get(code)
|
||||
if parts[0].lower() in _supported_locales:
|
||||
return cls.get(parts[0].lower())
|
||||
return cls.get(_default_locale)
|
||||
|
||||
@classmethod
|
||||
def get(cls, code: str) -> "Locale":
|
||||
"""Returns the Locale for the given locale code.
|
||||
|
||||
If it is not supported, we raise an exception.
|
||||
"""
|
||||
if code not in cls._cache:
|
||||
assert code in _supported_locales
|
||||
translations = _translations.get(code, None)
|
||||
if translations is None:
|
||||
locale = CSVLocale(code, {}) # type: Locale
|
||||
elif _use_gettext:
|
||||
locale = GettextLocale(code, translations)
|
||||
else:
|
||||
locale = CSVLocale(code, translations)
|
||||
cls._cache[code] = locale
|
||||
return cls._cache[code]
|
||||
|
||||
def __init__(self, code: str) -> None:
|
||||
self.code = code
|
||||
self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
|
||||
self.rtl = False
|
||||
for prefix in ["fa", "ar", "he"]:
|
||||
if self.code.startswith(prefix):
|
||||
self.rtl = True
|
||||
break
|
||||
|
||||
# Initialize strings for date formatting
|
||||
_ = self.translate
|
||||
self._months = [
|
||||
_("January"),
|
||||
_("February"),
|
||||
_("March"),
|
||||
_("April"),
|
||||
_("May"),
|
||||
_("June"),
|
||||
_("July"),
|
||||
_("August"),
|
||||
_("September"),
|
||||
_("October"),
|
||||
_("November"),
|
||||
_("December"),
|
||||
]
|
||||
self._weekdays = [
|
||||
_("Monday"),
|
||||
_("Tuesday"),
|
||||
_("Wednesday"),
|
||||
_("Thursday"),
|
||||
_("Friday"),
|
||||
_("Saturday"),
|
||||
_("Sunday"),
|
||||
]
|
||||
|
||||
def translate(
|
||||
self,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Returns the translation for the given message for this locale.
|
||||
|
||||
If ``plural_message`` is given, you must also provide
|
||||
``count``. We return ``plural_message`` when ``count != 1``,
|
||||
and we return the singular form for the given message when
|
||||
``count == 1``.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def pgettext(
|
||||
self,
|
||||
context: str,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
def format_date(
|
||||
self,
|
||||
date: Union[int, float, datetime.datetime],
|
||||
gmt_offset: int = 0,
|
||||
relative: bool = True,
|
||||
shorter: bool = False,
|
||||
full_format: bool = False,
|
||||
) -> str:
|
||||
"""Formats the given date (which should be GMT).
|
||||
|
||||
By default, we return a relative time (e.g., "2 minutes ago"). You
|
||||
can return an absolute date string with ``relative=False``.
|
||||
|
||||
You can force a full format date ("July 10, 1980") with
|
||||
``full_format=True``.
|
||||
|
||||
This method is primarily intended for dates in the past.
|
||||
For dates in the future, we fall back to full format.
|
||||
"""
|
||||
if isinstance(date, (int, float)):
|
||||
date = datetime.datetime.utcfromtimestamp(date)
|
||||
now = datetime.datetime.utcnow()
|
||||
if date > now:
|
||||
if relative and (date - now).seconds < 60:
|
||||
# Due to click skew, things are some things slightly
|
||||
# in the future. Round timestamps in the immediate
|
||||
# future down to now in relative mode.
|
||||
date = now
|
||||
else:
|
||||
# Otherwise, future dates always use the full format.
|
||||
full_format = True
|
||||
local_date = date - datetime.timedelta(minutes=gmt_offset)
|
||||
local_now = now - datetime.timedelta(minutes=gmt_offset)
|
||||
local_yesterday = local_now - datetime.timedelta(hours=24)
|
||||
difference = now - date
|
||||
seconds = difference.seconds
|
||||
days = difference.days
|
||||
|
||||
_ = self.translate
|
||||
format = None
|
||||
if not full_format:
|
||||
if relative and days == 0:
|
||||
if seconds < 50:
|
||||
return _("1 second ago", "%(seconds)d seconds ago", seconds) % {
|
||||
"seconds": seconds
|
||||
}
|
||||
|
||||
if seconds < 50 * 60:
|
||||
minutes = round(seconds / 60.0)
|
||||
return _("1 minute ago", "%(minutes)d minutes ago", minutes) % {
|
||||
"minutes": minutes
|
||||
}
|
||||
|
||||
hours = round(seconds / (60.0 * 60))
|
||||
return _("1 hour ago", "%(hours)d hours ago", hours) % {"hours": hours}
|
||||
|
||||
if days == 0:
|
||||
format = _("%(time)s")
|
||||
elif days == 1 and local_date.day == local_yesterday.day and relative:
|
||||
format = _("yesterday") if shorter else _("yesterday at %(time)s")
|
||||
elif days < 5:
|
||||
format = _("%(weekday)s") if shorter else _("%(weekday)s at %(time)s")
|
||||
elif days < 334: # 11mo, since confusing for same month last year
|
||||
format = (
|
||||
_("%(month_name)s %(day)s")
|
||||
if shorter
|
||||
else _("%(month_name)s %(day)s at %(time)s")
|
||||
)
|
||||
|
||||
if format is None:
|
||||
format = (
|
||||
_("%(month_name)s %(day)s, %(year)s")
|
||||
if shorter
|
||||
else _("%(month_name)s %(day)s, %(year)s at %(time)s")
|
||||
)
|
||||
|
||||
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
|
||||
if tfhour_clock:
|
||||
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
|
||||
elif self.code == "zh_CN":
|
||||
str_time = "%s%d:%02d" % (
|
||||
(u"\u4e0a\u5348", u"\u4e0b\u5348")[local_date.hour >= 12],
|
||||
local_date.hour % 12 or 12,
|
||||
local_date.minute,
|
||||
)
|
||||
else:
|
||||
str_time = "%d:%02d %s" % (
|
||||
local_date.hour % 12 or 12,
|
||||
local_date.minute,
|
||||
("am", "pm")[local_date.hour >= 12],
|
||||
)
|
||||
|
||||
return format % {
|
||||
"month_name": self._months[local_date.month - 1],
|
||||
"weekday": self._weekdays[local_date.weekday()],
|
||||
"day": str(local_date.day),
|
||||
"year": str(local_date.year),
|
||||
"time": str_time,
|
||||
}
|
||||
|
||||
def format_day(
|
||||
self, date: datetime.datetime, gmt_offset: int = 0, dow: bool = True
|
||||
) -> bool:
|
||||
"""Formats the given date as a day of week.
|
||||
|
||||
Example: "Monday, January 22". You can remove the day of week with
|
||||
``dow=False``.
|
||||
"""
|
||||
local_date = date - datetime.timedelta(minutes=gmt_offset)
|
||||
_ = self.translate
|
||||
if dow:
|
||||
return _("%(weekday)s, %(month_name)s %(day)s") % {
|
||||
"month_name": self._months[local_date.month - 1],
|
||||
"weekday": self._weekdays[local_date.weekday()],
|
||||
"day": str(local_date.day),
|
||||
}
|
||||
else:
|
||||
return _("%(month_name)s %(day)s") % {
|
||||
"month_name": self._months[local_date.month - 1],
|
||||
"day": str(local_date.day),
|
||||
}
|
||||
|
||||
def list(self, parts: Any) -> str:
|
||||
"""Returns a comma-separated list for the given list of parts.
|
||||
|
||||
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
|
||||
of size 1.
|
||||
"""
|
||||
_ = self.translate
|
||||
if len(parts) == 0:
|
||||
return ""
|
||||
if len(parts) == 1:
|
||||
return parts[0]
|
||||
comma = u" \u0648 " if self.code.startswith("fa") else u", "
|
||||
return _("%(commas)s and %(last)s") % {
|
||||
"commas": comma.join(parts[:-1]),
|
||||
"last": parts[len(parts) - 1],
|
||||
}
|
||||
|
||||
def friendly_number(self, value: int) -> str:
|
||||
"""Returns a comma-separated number for the given integer."""
|
||||
if self.code not in ("en", "en_US"):
|
||||
return str(value)
|
||||
s = str(value)
|
||||
parts = []
|
||||
while s:
|
||||
parts.append(s[-3:])
|
||||
s = s[:-3]
|
||||
return ",".join(reversed(parts))
|
||||
|
||||
|
||||
class CSVLocale(Locale):
|
||||
"""Locale implementation using tornado's CSV translation format."""
|
||||
|
||||
def __init__(self, code: str, translations: Dict[str, Dict[str, str]]) -> None:
|
||||
self.translations = translations
|
||||
super().__init__(code)
|
||||
|
||||
def translate(
|
||||
self,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
if plural_message is not None:
|
||||
assert count is not None
|
||||
if count != 1:
|
||||
message = plural_message
|
||||
message_dict = self.translations.get("plural", {})
|
||||
else:
|
||||
message_dict = self.translations.get("singular", {})
|
||||
else:
|
||||
message_dict = self.translations.get("unknown", {})
|
||||
return message_dict.get(message, message)
|
||||
|
||||
def pgettext(
|
||||
self,
|
||||
context: str,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
if self.translations:
|
||||
gen_log.warning("pgettext is not supported by CSVLocale")
|
||||
return self.translate(message, plural_message, count)
|
||||
|
||||
|
||||
class GettextLocale(Locale):
|
||||
"""Locale implementation using the `gettext` module."""
|
||||
|
||||
def __init__(self, code: str, translations: gettext.NullTranslations) -> None:
|
||||
self.ngettext = translations.ngettext
|
||||
self.gettext = translations.gettext
|
||||
# self.gettext must exist before __init__ is called, since it
|
||||
# calls into self.translate
|
||||
super().__init__(code)
|
||||
|
||||
def translate(
|
||||
self,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
if plural_message is not None:
|
||||
assert count is not None
|
||||
return self.ngettext(message, plural_message, count)
|
||||
else:
|
||||
return self.gettext(message)
|
||||
|
||||
def pgettext(
|
||||
self,
|
||||
context: str,
|
||||
message: str,
|
||||
plural_message: Optional[str] = None,
|
||||
count: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Allows to set context for translation, accepts plural forms.
|
||||
|
||||
Usage example::
|
||||
|
||||
pgettext("law", "right")
|
||||
pgettext("good", "right")
|
||||
|
||||
Plural message example::
|
||||
|
||||
pgettext("organization", "club", "clubs", len(clubs))
|
||||
pgettext("stick", "club", "clubs", len(clubs))
|
||||
|
||||
To generate POT file with context, add following options to step 1
|
||||
of `load_gettext_translations` sequence::
|
||||
|
||||
xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3
|
||||
|
||||
.. versionadded:: 4.2
|
||||
"""
|
||||
if plural_message is not None:
|
||||
assert count is not None
|
||||
msgs_with_ctxt = (
|
||||
"%s%s%s" % (context, CONTEXT_SEPARATOR, message),
|
||||
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message),
|
||||
count,
|
||||
)
|
||||
result = self.ngettext(*msgs_with_ctxt)
|
||||
if CONTEXT_SEPARATOR in result:
|
||||
# Translation not found
|
||||
result = self.ngettext(message, plural_message, count)
|
||||
return result
|
||||
else:
|
||||
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
|
||||
result = self.gettext(msg_with_ctxt)
|
||||
if CONTEXT_SEPARATOR in result:
|
||||
# Translation not found
|
||||
result = message
|
||||
return result
|
571
venv/Lib/site-packages/tornado/locks.py
Normal file
571
venv/Lib/site-packages/tornado/locks.py
Normal file
|
@ -0,0 +1,571 @@
|
|||
# Copyright 2015 The Tornado Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import types
|
||||
|
||||
from tornado import gen, ioloop
|
||||
from tornado.concurrent import Future, future_set_result_unless_cancelled
|
||||
|
||||
from typing import Union, Optional, Type, Any, Awaitable
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Deque, Set # noqa: F401
|
||||
|
||||
__all__ = ["Condition", "Event", "Semaphore", "BoundedSemaphore", "Lock"]
|
||||
|
||||
|
||||
class _TimeoutGarbageCollector(object):
|
||||
"""Base class for objects that periodically clean up timed-out waiters.
|
||||
|
||||
Avoids memory leak in a common pattern like:
|
||||
|
||||
while True:
|
||||
yield condition.wait(short_timeout)
|
||||
print('looping....')
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._waiters = collections.deque() # type: Deque[Future]
|
||||
self._timeouts = 0
|
||||
|
||||
def _garbage_collect(self) -> None:
|
||||
# Occasionally clear timed-out waiters.
|
||||
self._timeouts += 1
|
||||
if self._timeouts > 100:
|
||||
self._timeouts = 0
|
||||
self._waiters = collections.deque(w for w in self._waiters if not w.done())
|
||||
|
||||
|
||||
class Condition(_TimeoutGarbageCollector):
|
||||
"""A condition allows one or more coroutines to wait until notified.
|
||||
|
||||
Like a standard `threading.Condition`, but does not need an underlying lock
|
||||
that is acquired and released.
|
||||
|
||||
With a `Condition`, coroutines can wait to be notified by other coroutines:
|
||||
|
||||
.. testcode::
|
||||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.locks import Condition
|
||||
|
||||
condition = Condition()
|
||||
|
||||
async def waiter():
|
||||
print("I'll wait right here")
|
||||
await condition.wait()
|
||||
print("I'm done waiting")
|
||||
|
||||
async def notifier():
|
||||
print("About to notify")
|
||||
condition.notify()
|
||||
print("Done notifying")
|
||||
|
||||
async def runner():
|
||||
# Wait for waiter() and notifier() in parallel
|
||||
await gen.multi([waiter(), notifier()])
|
||||
|
||||
IOLoop.current().run_sync(runner)
|
||||
|
||||
.. testoutput::
|
||||
|
||||
I'll wait right here
|
||||
About to notify
|
||||
Done notifying
|
||||
I'm done waiting
|
||||
|
||||
`wait` takes an optional ``timeout`` argument, which is either an absolute
|
||||
timestamp::
|
||||
|
||||
io_loop = IOLoop.current()
|
||||
|
||||
# Wait up to 1 second for a notification.
|
||||
await condition.wait(timeout=io_loop.time() + 1)
|
||||
|
||||
...or a `datetime.timedelta` for a timeout relative to the current time::
|
||||
|
||||
# Wait up to 1 second.
|
||||
await condition.wait(timeout=datetime.timedelta(seconds=1))
|
||||
|
||||
The method returns False if there's no notification before the deadline.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
Previously, waiters could be notified synchronously from within
|
||||
`notify`. Now, the notification will always be received on the
|
||||
next iteration of the `.IOLoop`.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.io_loop = ioloop.IOLoop.current()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
result = "<%s" % (self.__class__.__name__,)
|
||||
if self._waiters:
|
||||
result += " waiters[%s]" % len(self._waiters)
|
||||
return result + ">"
|
||||
|
||||
def wait(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[bool]:
|
||||
"""Wait for `.notify`.
|
||||
|
||||
Returns a `.Future` that resolves ``True`` if the condition is notified,
|
||||
or ``False`` after a timeout.
|
||||
"""
|
||||
waiter = Future() # type: Future[bool]
|
||||
self._waiters.append(waiter)
|
||||
if timeout:
|
||||
|
||||
def on_timeout() -> None:
|
||||
if not waiter.done():
|
||||
future_set_result_unless_cancelled(waiter, False)
|
||||
self._garbage_collect()
|
||||
|
||||
io_loop = ioloop.IOLoop.current()
|
||||
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
|
||||
waiter.add_done_callback(lambda _: io_loop.remove_timeout(timeout_handle))
|
||||
return waiter
|
||||
|
||||
def notify(self, n: int = 1) -> None:
|
||||
"""Wake ``n`` waiters."""
|
||||
waiters = [] # Waiters we plan to run right now.
|
||||
while n and self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
if not waiter.done(): # Might have timed out.
|
||||
n -= 1
|
||||
waiters.append(waiter)
|
||||
|
||||
for waiter in waiters:
|
||||
future_set_result_unless_cancelled(waiter, True)
|
||||
|
||||
def notify_all(self) -> None:
|
||||
"""Wake all waiters."""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
|
||||
class Event(object):
|
||||
"""An event blocks coroutines until its internal flag is set to True.
|
||||
|
||||
Similar to `threading.Event`.
|
||||
|
||||
A coroutine can wait for an event to be set. Once it is set, calls to
|
||||
``yield event.wait()`` will not block unless the event has been cleared:
|
||||
|
||||
.. testcode::
|
||||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.locks import Event
|
||||
|
||||
event = Event()
|
||||
|
||||
async def waiter():
|
||||
print("Waiting for event")
|
||||
await event.wait()
|
||||
print("Not waiting this time")
|
||||
await event.wait()
|
||||
print("Done")
|
||||
|
||||
async def setter():
|
||||
print("About to set the event")
|
||||
event.set()
|
||||
|
||||
async def runner():
|
||||
await gen.multi([waiter(), setter()])
|
||||
|
||||
IOLoop.current().run_sync(runner)
|
||||
|
||||
.. testoutput::
|
||||
|
||||
Waiting for event
|
||||
About to set the event
|
||||
Not waiting this time
|
||||
Done
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._value = False
|
||||
self._waiters = set() # type: Set[Future[None]]
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<%s %s>" % (
|
||||
self.__class__.__name__,
|
||||
"set" if self.is_set() else "clear",
|
||||
)
|
||||
|
||||
def is_set(self) -> bool:
|
||||
"""Return ``True`` if the internal flag is true."""
|
||||
return self._value
|
||||
|
||||
def set(self) -> None:
|
||||
"""Set the internal flag to ``True``. All waiters are awakened.
|
||||
|
||||
Calling `.wait` once the flag is set will not block.
|
||||
"""
|
||||
if not self._value:
|
||||
self._value = True
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(None)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Reset the internal flag to ``False``.
|
||||
|
||||
Calls to `.wait` will block until `.set` is called.
|
||||
"""
|
||||
self._value = False
|
||||
|
||||
def wait(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[None]:
|
||||
"""Block until the internal flag is true.
|
||||
|
||||
Returns an awaitable, which raises `tornado.util.TimeoutError` after a
|
||||
timeout.
|
||||
"""
|
||||
fut = Future() # type: Future[None]
|
||||
if self._value:
|
||||
fut.set_result(None)
|
||||
return fut
|
||||
self._waiters.add(fut)
|
||||
fut.add_done_callback(lambda fut: self._waiters.remove(fut))
|
||||
if timeout is None:
|
||||
return fut
|
||||
else:
|
||||
timeout_fut = gen.with_timeout(timeout, fut)
|
||||
# This is a slightly clumsy workaround for the fact that
|
||||
# gen.with_timeout doesn't cancel its futures. Cancelling
|
||||
# fut will remove it from the waiters list.
|
||||
timeout_fut.add_done_callback(
|
||||
lambda tf: fut.cancel() if not fut.done() else None
|
||||
)
|
||||
return timeout_fut
|
||||
|
||||
|
||||
class _ReleasingContextManager(object):
|
||||
"""Releases a Lock or Semaphore at the end of a "with" statement.
|
||||
|
||||
with (yield semaphore.acquire()):
|
||||
pass
|
||||
|
||||
# Now semaphore.release() has been called.
|
||||
"""
|
||||
|
||||
def __init__(self, obj: Any) -> None:
|
||||
self._obj = obj
|
||||
|
||||
def __enter__(self) -> None:
|
||||
pass
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: "Optional[Type[BaseException]]",
|
||||
exc_val: Optional[BaseException],
|
||||
exc_tb: Optional[types.TracebackType],
|
||||
) -> None:
|
||||
self._obj.release()
|
||||
|
||||
|
||||
class Semaphore(_TimeoutGarbageCollector):
|
||||
"""A lock that can be acquired a fixed number of times before blocking.
|
||||
|
||||
A Semaphore manages a counter representing the number of `.release` calls
|
||||
minus the number of `.acquire` calls, plus an initial value. The `.acquire`
|
||||
method blocks if necessary until it can return without making the counter
|
||||
negative.
|
||||
|
||||
Semaphores limit access to a shared resource. To allow access for two
|
||||
workers at a time:
|
||||
|
||||
.. testsetup:: semaphore
|
||||
|
||||
from collections import deque
|
||||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.concurrent import Future
|
||||
|
||||
# Ensure reliable doctest output: resolve Futures one at a time.
|
||||
futures_q = deque([Future() for _ in range(3)])
|
||||
|
||||
async def simulator(futures):
|
||||
for f in futures:
|
||||
# simulate the asynchronous passage of time
|
||||
await gen.sleep(0)
|
||||
await gen.sleep(0)
|
||||
f.set_result(None)
|
||||
|
||||
IOLoop.current().add_callback(simulator, list(futures_q))
|
||||
|
||||
def use_some_resource():
|
||||
return futures_q.popleft()
|
||||
|
||||
.. testcode:: semaphore
|
||||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.locks import Semaphore
|
||||
|
||||
sem = Semaphore(2)
|
||||
|
||||
async def worker(worker_id):
|
||||
await sem.acquire()
|
||||
try:
|
||||
print("Worker %d is working" % worker_id)
|
||||
await use_some_resource()
|
||||
finally:
|
||||
print("Worker %d is done" % worker_id)
|
||||
sem.release()
|
||||
|
||||
async def runner():
|
||||
# Join all workers.
|
||||
await gen.multi([worker(i) for i in range(3)])
|
||||
|
||||
IOLoop.current().run_sync(runner)
|
||||
|
||||
.. testoutput:: semaphore
|
||||
|
||||
Worker 0 is working
|
||||
Worker 1 is working
|
||||
Worker 0 is done
|
||||
Worker 2 is working
|
||||
Worker 1 is done
|
||||
Worker 2 is done
|
||||
|
||||
Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until
|
||||
the semaphore has been released once, by worker 0.
|
||||
|
||||
The semaphore can be used as an async context manager::
|
||||
|
||||
async def worker(worker_id):
|
||||
async with sem:
|
||||
print("Worker %d is working" % worker_id)
|
||||
await use_some_resource()
|
||||
|
||||
# Now the semaphore has been released.
|
||||
print("Worker %d is done" % worker_id)
|
||||
|
||||
For compatibility with older versions of Python, `.acquire` is a
|
||||
context manager, so ``worker`` could also be written as::
|
||||
|
||||
@gen.coroutine
|
||||
def worker(worker_id):
|
||||
with (yield sem.acquire()):
|
||||
print("Worker %d is working" % worker_id)
|
||||
yield use_some_resource()
|
||||
|
||||
# Now the semaphore has been released.
|
||||
print("Worker %d is done" % worker_id)
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Added ``async with`` support in Python 3.5.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, value: int = 1) -> None:
|
||||
super().__init__()
|
||||
if value < 0:
|
||||
raise ValueError("semaphore initial value must be >= 0")
|
||||
|
||||
self._value = value
|
||||
|
||||
def __repr__(self) -> str:
|
||||
res = super().__repr__()
|
||||
extra = (
|
||||
"locked" if self._value == 0 else "unlocked,value:{0}".format(self._value)
|
||||
)
|
||||
if self._waiters:
|
||||
extra = "{0},waiters:{1}".format(extra, len(self._waiters))
|
||||
return "<{0} [{1}]>".format(res[1:-1], extra)
|
||||
|
||||
def release(self) -> None:
|
||||
"""Increment the counter and wake one waiter."""
|
||||
self._value += 1
|
||||
while self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
if not waiter.done():
|
||||
self._value -= 1
|
||||
|
||||
# If the waiter is a coroutine paused at
|
||||
#
|
||||
# with (yield semaphore.acquire()):
|
||||
#
|
||||
# then the context manager's __exit__ calls release() at the end
|
||||
# of the "with" block.
|
||||
waiter.set_result(_ReleasingContextManager(self))
|
||||
break
|
||||
|
||||
def acquire(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[_ReleasingContextManager]:
|
||||
"""Decrement the counter. Returns an awaitable.
|
||||
|
||||
Block if the counter is zero and wait for a `.release`. The awaitable
|
||||
raises `.TimeoutError` after the deadline.
|
||||
"""
|
||||
waiter = Future() # type: Future[_ReleasingContextManager]
|
||||
if self._value > 0:
|
||||
self._value -= 1
|
||||
waiter.set_result(_ReleasingContextManager(self))
|
||||
else:
|
||||
self._waiters.append(waiter)
|
||||
if timeout:
|
||||
|
||||
def on_timeout() -> None:
|
||||
if not waiter.done():
|
||||
waiter.set_exception(gen.TimeoutError())
|
||||
self._garbage_collect()
|
||||
|
||||
io_loop = ioloop.IOLoop.current()
|
||||
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
|
||||
waiter.add_done_callback(
|
||||
lambda _: io_loop.remove_timeout(timeout_handle)
|
||||
)
|
||||
return waiter
|
||||
|
||||
def __enter__(self) -> None:
|
||||
raise RuntimeError("Use 'async with' instead of 'with' for Semaphore")
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
traceback: Optional[types.TracebackType],
|
||||
) -> None:
|
||||
self.__enter__()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
tb: Optional[types.TracebackType],
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A semaphore that prevents release() being called too many times.
|
||||
|
||||
If `.release` would increment the semaphore's value past the initial
|
||||
value, it raises `ValueError`. Semaphores are mostly used to guard
|
||||
resources with limited capacity, so a semaphore released too many times
|
||||
is a sign of a bug.
|
||||
"""
|
||||
|
||||
def __init__(self, value: int = 1) -> None:
|
||||
super().__init__(value=value)
|
||||
self._initial_value = value
|
||||
|
||||
def release(self) -> None:
|
||||
"""Increment the counter and wake one waiter."""
|
||||
if self._value >= self._initial_value:
|
||||
raise ValueError("Semaphore released too many times")
|
||||
super().release()
|
||||
|
||||
|
||||
class Lock(object):
|
||||
"""A lock for coroutines.
|
||||
|
||||
A Lock begins unlocked, and `acquire` locks it immediately. While it is
|
||||
locked, a coroutine that yields `acquire` waits until another coroutine
|
||||
calls `release`.
|
||||
|
||||
Releasing an unlocked lock raises `RuntimeError`.
|
||||
|
||||
A Lock can be used as an async context manager with the ``async
|
||||
with`` statement:
|
||||
|
||||
>>> from tornado import locks
|
||||
>>> lock = locks.Lock()
|
||||
>>>
|
||||
>>> async def f():
|
||||
... async with lock:
|
||||
... # Do something holding the lock.
|
||||
... pass
|
||||
...
|
||||
... # Now the lock is released.
|
||||
|
||||
For compatibility with older versions of Python, the `.acquire`
|
||||
method asynchronously returns a regular context manager:
|
||||
|
||||
>>> async def f2():
|
||||
... with (yield lock.acquire()):
|
||||
... # Do something holding the lock.
|
||||
... pass
|
||||
...
|
||||
... # Now the lock is released.
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Added ``async with`` support in Python 3.5.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._block = BoundedSemaphore(value=1)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<%s _block=%s>" % (self.__class__.__name__, self._block)
|
||||
|
||||
def acquire(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[_ReleasingContextManager]:
|
||||
"""Attempt to lock. Returns an awaitable.
|
||||
|
||||
Returns an awaitable, which raises `tornado.util.TimeoutError` after a
|
||||
timeout.
|
||||
"""
|
||||
return self._block.acquire(timeout)
|
||||
|
||||
def release(self) -> None:
|
||||
"""Unlock.
|
||||
|
||||
The first coroutine in line waiting for `acquire` gets the lock.
|
||||
|
||||
If not locked, raise a `RuntimeError`.
|
||||
"""
|
||||
try:
|
||||
self._block.release()
|
||||
except ValueError:
|
||||
raise RuntimeError("release unlocked lock")
|
||||
|
||||
def __enter__(self) -> None:
|
||||
raise RuntimeError("Use `async with` instead of `with` for Lock")
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
tb: Optional[types.TracebackType],
|
||||
) -> None:
|
||||
self.__enter__()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
tb: Optional[types.TracebackType],
|
||||
) -> None:
|
||||
self.release()
|
339
venv/Lib/site-packages/tornado/log.py
Normal file
339
venv/Lib/site-packages/tornado/log.py
Normal file
|
@ -0,0 +1,339 @@
|
|||
#
|
||||
# Copyright 2012 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Logging support for Tornado.
|
||||
|
||||
Tornado uses three logger streams:
|
||||
|
||||
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
|
||||
potentially other servers in the future)
|
||||
* ``tornado.application``: Logging of errors from application code (i.e.
|
||||
uncaught exceptions from callbacks)
|
||||
* ``tornado.general``: General-purpose logging, including any errors
|
||||
or warnings from Tornado itself.
|
||||
|
||||
These streams may be configured independently using the standard library's
|
||||
`logging` module. For example, you may wish to send ``tornado.access`` logs
|
||||
to a separate file for analysis.
|
||||
"""
|
||||
import logging
|
||||
import logging.handlers
|
||||
import sys
|
||||
|
||||
from tornado.escape import _unicode
|
||||
from tornado.util import unicode_type, basestring_type
|
||||
|
||||
try:
|
||||
import colorama # type: ignore
|
||||
except ImportError:
|
||||
colorama = None
|
||||
|
||||
try:
|
||||
import curses
|
||||
except ImportError:
|
||||
curses = None # type: ignore
|
||||
|
||||
from typing import Dict, Any, cast, Optional
|
||||
|
||||
# Logger objects for internal tornado use
|
||||
access_log = logging.getLogger("tornado.access")
|
||||
app_log = logging.getLogger("tornado.application")
|
||||
gen_log = logging.getLogger("tornado.general")
|
||||
|
||||
|
||||
def _stderr_supports_color() -> bool:
|
||||
try:
|
||||
if hasattr(sys.stderr, "isatty") and sys.stderr.isatty():
|
||||
if curses:
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 0:
|
||||
return True
|
||||
elif colorama:
|
||||
if sys.stderr is getattr(
|
||||
colorama.initialise, "wrapped_stderr", object()
|
||||
):
|
||||
return True
|
||||
except Exception:
|
||||
# Very broad exception handling because it's always better to
|
||||
# fall back to non-colored logs than to break at startup.
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def _safe_unicode(s: Any) -> str:
|
||||
try:
|
||||
return _unicode(s)
|
||||
except UnicodeDecodeError:
|
||||
return repr(s)
|
||||
|
||||
|
||||
class LogFormatter(logging.Formatter):
|
||||
"""Log formatter used in Tornado.
|
||||
|
||||
Key features of this formatter are:
|
||||
|
||||
* Color support when logging to a terminal that supports it.
|
||||
* Timestamps on every log line.
|
||||
* Robust against str/bytes encoding problems.
|
||||
|
||||
This formatter is enabled automatically by
|
||||
`tornado.options.parse_command_line` or `tornado.options.parse_config_file`
|
||||
(unless ``--logging=none`` is used).
|
||||
|
||||
Color support on Windows versions that do not support ANSI color codes is
|
||||
enabled by use of the colorama__ library. Applications that wish to use
|
||||
this must first initialize colorama with a call to ``colorama.init``.
|
||||
See the colorama documentation for details.
|
||||
|
||||
__ https://pypi.python.org/pypi/colorama
|
||||
|
||||
.. versionchanged:: 4.5
|
||||
Added support for ``colorama``. Changed the constructor
|
||||
signature to be compatible with `logging.config.dictConfig`.
|
||||
"""
|
||||
|
||||
DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s" # noqa: E501
|
||||
DEFAULT_DATE_FORMAT = "%y%m%d %H:%M:%S"
|
||||
DEFAULT_COLORS = {
|
||||
logging.DEBUG: 4, # Blue
|
||||
logging.INFO: 2, # Green
|
||||
logging.WARNING: 3, # Yellow
|
||||
logging.ERROR: 1, # Red
|
||||
logging.CRITICAL: 5, # Magenta
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
fmt: str = DEFAULT_FORMAT,
|
||||
datefmt: str = DEFAULT_DATE_FORMAT,
|
||||
style: str = "%",
|
||||
color: bool = True,
|
||||
colors: Dict[int, int] = DEFAULT_COLORS,
|
||||
) -> None:
|
||||
r"""
|
||||
:arg bool color: Enables color support.
|
||||
:arg str fmt: Log message format.
|
||||
It will be applied to the attributes dict of log records. The
|
||||
text between ``%(color)s`` and ``%(end_color)s`` will be colored
|
||||
depending on the level if color support is on.
|
||||
:arg dict colors: color mappings from logging level to terminal color
|
||||
code
|
||||
:arg str datefmt: Datetime format.
|
||||
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
|
||||
Added ``fmt`` and ``datefmt`` arguments.
|
||||
"""
|
||||
logging.Formatter.__init__(self, datefmt=datefmt)
|
||||
self._fmt = fmt
|
||||
|
||||
self._colors = {} # type: Dict[int, str]
|
||||
if color and _stderr_supports_color():
|
||||
if curses is not None:
|
||||
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or b""
|
||||
|
||||
for levelno, code in colors.items():
|
||||
# Convert the terminal control characters from
|
||||
# bytes to unicode strings for easier use with the
|
||||
# logging module.
|
||||
self._colors[levelno] = unicode_type(
|
||||
curses.tparm(fg_color, code), "ascii"
|
||||
)
|
||||
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
|
||||
else:
|
||||
# If curses is not present (currently we'll only get here for
|
||||
# colorama on windows), assume hard-coded ANSI color codes.
|
||||
for levelno, code in colors.items():
|
||||
self._colors[levelno] = "\033[2;3%dm" % code
|
||||
self._normal = "\033[0m"
|
||||
else:
|
||||
self._normal = ""
|
||||
|
||||
def format(self, record: Any) -> str:
|
||||
try:
|
||||
message = record.getMessage()
|
||||
assert isinstance(message, basestring_type) # guaranteed by logging
|
||||
# Encoding notes: The logging module prefers to work with character
|
||||
# strings, but only enforces that log messages are instances of
|
||||
# basestring. In python 2, non-ascii bytestrings will make
|
||||
# their way through the logging framework until they blow up with
|
||||
# an unhelpful decoding error (with this formatter it happens
|
||||
# when we attach the prefix, but there are other opportunities for
|
||||
# exceptions further along in the framework).
|
||||
#
|
||||
# If a byte string makes it this far, convert it to unicode to
|
||||
# ensure it will make it out to the logs. Use repr() as a fallback
|
||||
# to ensure that all byte strings can be converted successfully,
|
||||
# but don't do it by default so we don't add extra quotes to ascii
|
||||
# bytestrings. This is a bit of a hacky place to do this, but
|
||||
# it's worth it since the encoding errors that would otherwise
|
||||
# result are so useless (and tornado is fond of using utf8-encoded
|
||||
# byte strings wherever possible).
|
||||
record.message = _safe_unicode(message)
|
||||
except Exception as e:
|
||||
record.message = "Bad message (%r): %r" % (e, record.__dict__)
|
||||
|
||||
record.asctime = self.formatTime(record, cast(str, self.datefmt))
|
||||
|
||||
if record.levelno in self._colors:
|
||||
record.color = self._colors[record.levelno]
|
||||
record.end_color = self._normal
|
||||
else:
|
||||
record.color = record.end_color = ""
|
||||
|
||||
formatted = self._fmt % record.__dict__
|
||||
|
||||
if record.exc_info:
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
# exc_text contains multiple lines. We need to _safe_unicode
|
||||
# each line separately so that non-utf8 bytes don't cause
|
||||
# all the newlines to turn into '\n'.
|
||||
lines = [formatted.rstrip()]
|
||||
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split("\n"))
|
||||
formatted = "\n".join(lines)
|
||||
return formatted.replace("\n", "\n ")
|
||||
|
||||
|
||||
def enable_pretty_logging(
|
||||
options: Any = None, logger: Optional[logging.Logger] = None
|
||||
) -> None:
|
||||
"""Turns on formatted logging output as configured.
|
||||
|
||||
This is called automatically by `tornado.options.parse_command_line`
|
||||
and `tornado.options.parse_config_file`.
|
||||
"""
|
||||
if options is None:
|
||||
import tornado.options
|
||||
|
||||
options = tornado.options.options
|
||||
if options.logging is None or options.logging.lower() == "none":
|
||||
return
|
||||
if logger is None:
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(getattr(logging, options.logging.upper()))
|
||||
if options.log_file_prefix:
|
||||
rotate_mode = options.log_rotate_mode
|
||||
if rotate_mode == "size":
|
||||
channel = logging.handlers.RotatingFileHandler(
|
||||
filename=options.log_file_prefix,
|
||||
maxBytes=options.log_file_max_size,
|
||||
backupCount=options.log_file_num_backups,
|
||||
encoding="utf-8",
|
||||
) # type: logging.Handler
|
||||
elif rotate_mode == "time":
|
||||
channel = logging.handlers.TimedRotatingFileHandler(
|
||||
filename=options.log_file_prefix,
|
||||
when=options.log_rotate_when,
|
||||
interval=options.log_rotate_interval,
|
||||
backupCount=options.log_file_num_backups,
|
||||
encoding="utf-8",
|
||||
)
|
||||
else:
|
||||
error_message = (
|
||||
"The value of log_rotate_mode option should be "
|
||||
+ '"size" or "time", not "%s".' % rotate_mode
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
channel.setFormatter(LogFormatter(color=False))
|
||||
logger.addHandler(channel)
|
||||
|
||||
if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers):
|
||||
# Set up color if we are in a tty and curses is installed
|
||||
channel = logging.StreamHandler()
|
||||
channel.setFormatter(LogFormatter())
|
||||
logger.addHandler(channel)
|
||||
|
||||
|
||||
def define_logging_options(options: Any = None) -> None:
|
||||
"""Add logging-related flags to ``options``.
|
||||
|
||||
These options are present automatically on the default options instance;
|
||||
this method is only necessary if you have created your own `.OptionParser`.
|
||||
|
||||
.. versionadded:: 4.2
|
||||
This function existed in prior versions but was broken and undocumented until 4.2.
|
||||
"""
|
||||
if options is None:
|
||||
# late import to prevent cycle
|
||||
import tornado.options
|
||||
|
||||
options = tornado.options.options
|
||||
options.define(
|
||||
"logging",
|
||||
default="info",
|
||||
help=(
|
||||
"Set the Python log level. If 'none', tornado won't touch the "
|
||||
"logging configuration."
|
||||
),
|
||||
metavar="debug|info|warning|error|none",
|
||||
)
|
||||
options.define(
|
||||
"log_to_stderr",
|
||||
type=bool,
|
||||
default=None,
|
||||
help=(
|
||||
"Send log output to stderr (colorized if possible). "
|
||||
"By default use stderr if --log_file_prefix is not set and "
|
||||
"no other logging is configured."
|
||||
),
|
||||
)
|
||||
options.define(
|
||||
"log_file_prefix",
|
||||
type=str,
|
||||
default=None,
|
||||
metavar="PATH",
|
||||
help=(
|
||||
"Path prefix for log files. "
|
||||
"Note that if you are running multiple tornado processes, "
|
||||
"log_file_prefix must be different for each of them (e.g. "
|
||||
"include the port number)"
|
||||
),
|
||||
)
|
||||
options.define(
|
||||
"log_file_max_size",
|
||||
type=int,
|
||||
default=100 * 1000 * 1000,
|
||||
help="max size of log files before rollover",
|
||||
)
|
||||
options.define(
|
||||
"log_file_num_backups", type=int, default=10, help="number of log files to keep"
|
||||
)
|
||||
|
||||
options.define(
|
||||
"log_rotate_when",
|
||||
type=str,
|
||||
default="midnight",
|
||||
help=(
|
||||
"specify the type of TimedRotatingFileHandler interval "
|
||||
"other options:('S', 'M', 'H', 'D', 'W0'-'W6')"
|
||||
),
|
||||
)
|
||||
options.define(
|
||||
"log_rotate_interval",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The interval value of timed rotating",
|
||||
)
|
||||
|
||||
options.define(
|
||||
"log_rotate_mode",
|
||||
type=str,
|
||||
default="size",
|
||||
help="The mode of rotating files(time or size)",
|
||||
)
|
||||
|
||||
options.add_parse_callback(lambda: enable_pretty_logging(options))
|
617
venv/Lib/site-packages/tornado/netutil.py
Normal file
617
venv/Lib/site-packages/tornado/netutil.py
Normal file
|
@ -0,0 +1,617 @@
|
|||
#
|
||||
# Copyright 2011 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Miscellaneous network utility code."""
|
||||
|
||||
import concurrent.futures
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
|
||||
from tornado.concurrent import dummy_executor, run_on_executor
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.util import Configurable, errno_from_exception
|
||||
|
||||
from typing import List, Callable, Any, Type, Dict, Union, Tuple, Awaitable, Optional
|
||||
|
||||
# Note that the naming of ssl.Purpose is confusing; the purpose
|
||||
# of a context is to authentiate the opposite side of the connection.
|
||||
_client_ssl_defaults = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
_server_ssl_defaults = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
if hasattr(ssl, "OP_NO_COMPRESSION"):
|
||||
# See netutil.ssl_options_to_context
|
||||
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
|
||||
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
|
||||
|
||||
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
|
||||
# getaddrinfo attempts to import encodings.idna. If this is done at
|
||||
# module-import time, the import lock is already held by the main thread,
|
||||
# leading to deadlock. Avoid it by caching the idna encoder on the main
|
||||
# thread now.
|
||||
u"foo".encode("idna")
|
||||
|
||||
# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
|
||||
u"foo".encode("latin1")
|
||||
|
||||
# Default backlog used when calling sock.listen()
|
||||
_DEFAULT_BACKLOG = 128
|
||||
|
||||
|
||||
def bind_sockets(
|
||||
port: int,
|
||||
address: Optional[str] = None,
|
||||
family: socket.AddressFamily = socket.AF_UNSPEC,
|
||||
backlog: int = _DEFAULT_BACKLOG,
|
||||
flags: Optional[int] = None,
|
||||
reuse_port: bool = False,
|
||||
) -> List[socket.socket]:
|
||||
"""Creates listening sockets bound to the given port and address.
|
||||
|
||||
Returns a list of socket objects (multiple sockets are returned if
|
||||
the given address maps to multiple IP addresses, which is most common
|
||||
for mixed IPv4 and IPv6 use).
|
||||
|
||||
Address may be either an IP address or hostname. If it's a hostname,
|
||||
the server will listen on all IP addresses associated with the
|
||||
name. Address may be an empty string or None to listen on all
|
||||
available interfaces. Family may be set to either `socket.AF_INET`
|
||||
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
|
||||
both will be used if available.
|
||||
|
||||
The ``backlog`` argument has the same meaning as for
|
||||
`socket.listen() <socket.socket.listen>`.
|
||||
|
||||
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
|
||||
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
|
||||
|
||||
``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
|
||||
in the list. If your platform doesn't support this option ValueError will
|
||||
be raised.
|
||||
"""
|
||||
if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
|
||||
raise ValueError("the platform doesn't support SO_REUSEPORT")
|
||||
|
||||
sockets = []
|
||||
if address == "":
|
||||
address = None
|
||||
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
|
||||
# Python can be compiled with --disable-ipv6, which causes
|
||||
# operations on AF_INET6 sockets to fail, but does not
|
||||
# automatically exclude those results from getaddrinfo
|
||||
# results.
|
||||
# http://bugs.python.org/issue16208
|
||||
family = socket.AF_INET
|
||||
if flags is None:
|
||||
flags = socket.AI_PASSIVE
|
||||
bound_port = None
|
||||
unique_addresses = set() # type: set
|
||||
for res in sorted(
|
||||
socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
|
||||
key=lambda x: x[0],
|
||||
):
|
||||
if res in unique_addresses:
|
||||
continue
|
||||
|
||||
unique_addresses.add(res)
|
||||
|
||||
af, socktype, proto, canonname, sockaddr = res
|
||||
if (
|
||||
sys.platform == "darwin"
|
||||
and address == "localhost"
|
||||
and af == socket.AF_INET6
|
||||
and sockaddr[3] != 0
|
||||
):
|
||||
# Mac OS X includes a link-local address fe80::1%lo0 in the
|
||||
# getaddrinfo results for 'localhost'. However, the firewall
|
||||
# doesn't understand that this is a local address and will
|
||||
# prompt for access (often repeatedly, due to an apparent
|
||||
# bug in its ability to remember granting access to an
|
||||
# application). Skip these addresses.
|
||||
continue
|
||||
try:
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
except socket.error as e:
|
||||
if errno_from_exception(e) == errno.EAFNOSUPPORT:
|
||||
continue
|
||||
raise
|
||||
if os.name != "nt":
|
||||
try:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
except socket.error as e:
|
||||
if errno_from_exception(e) != errno.ENOPROTOOPT:
|
||||
# Hurd doesn't support SO_REUSEADDR.
|
||||
raise
|
||||
if reuse_port:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
if af == socket.AF_INET6:
|
||||
# On linux, ipv6 sockets accept ipv4 too by default,
|
||||
# but this makes it impossible to bind to both
|
||||
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
|
||||
# separate sockets *must* be used to listen for both ipv4
|
||||
# and ipv6. For consistency, always disable ipv4 on our
|
||||
# ipv6 sockets and use a separate ipv4 socket when needed.
|
||||
#
|
||||
# Python 2.x on windows doesn't have IPPROTO_IPV6.
|
||||
if hasattr(socket, "IPPROTO_IPV6"):
|
||||
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
|
||||
# automatic port allocation with port=None
|
||||
# should bind on the same port on IPv4 and IPv6
|
||||
host, requested_port = sockaddr[:2]
|
||||
if requested_port == 0 and bound_port is not None:
|
||||
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
|
||||
|
||||
sock.setblocking(False)
|
||||
try:
|
||||
sock.bind(sockaddr)
|
||||
except OSError as e:
|
||||
if (
|
||||
errno_from_exception(e) == errno.EADDRNOTAVAIL
|
||||
and address == "localhost"
|
||||
and sockaddr[0] == "::1"
|
||||
):
|
||||
# On some systems (most notably docker with default
|
||||
# configurations), ipv6 is partially disabled:
|
||||
# socket.has_ipv6 is true, we can create AF_INET6
|
||||
# sockets, and getaddrinfo("localhost", ...,
|
||||
# AF_PASSIVE) resolves to ::1, but we get an error
|
||||
# when binding.
|
||||
#
|
||||
# Swallow the error, but only for this specific case.
|
||||
# If EADDRNOTAVAIL occurs in other situations, it
|
||||
# might be a real problem like a typo in a
|
||||
# configuration.
|
||||
sock.close()
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
bound_port = sock.getsockname()[1]
|
||||
sock.listen(backlog)
|
||||
sockets.append(sock)
|
||||
return sockets
|
||||
|
||||
|
||||
if hasattr(socket, "AF_UNIX"):
|
||||
|
||||
def bind_unix_socket(
|
||||
file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG
|
||||
) -> socket.socket:
|
||||
"""Creates a listening unix socket.
|
||||
|
||||
If a socket with the given name already exists, it will be deleted.
|
||||
If any other file with that name exists, an exception will be
|
||||
raised.
|
||||
|
||||
Returns a socket object (not a list of socket objects like
|
||||
`bind_sockets`)
|
||||
"""
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
except socket.error as e:
|
||||
if errno_from_exception(e) != errno.ENOPROTOOPT:
|
||||
# Hurd doesn't support SO_REUSEADDR
|
||||
raise
|
||||
sock.setblocking(False)
|
||||
try:
|
||||
st = os.stat(file)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
if stat.S_ISSOCK(st.st_mode):
|
||||
os.remove(file)
|
||||
else:
|
||||
raise ValueError("File %s exists and is not a socket", file)
|
||||
sock.bind(file)
|
||||
os.chmod(file, mode)
|
||||
sock.listen(backlog)
|
||||
return sock
|
||||
|
||||
|
||||
def add_accept_handler(
|
||||
sock: socket.socket, callback: Callable[[socket.socket, Any], None]
|
||||
) -> Callable[[], None]:
|
||||
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
|
||||
|
||||
When a connection is accepted, ``callback(connection, address)`` will
|
||||
be run (``connection`` is a socket object, and ``address`` is the
|
||||
address of the other end of the connection). Note that this signature
|
||||
is different from the ``callback(fd, events)`` signature used for
|
||||
`.IOLoop` handlers.
|
||||
|
||||
A callable is returned which, when called, will remove the `.IOLoop`
|
||||
event handler and stop processing further incoming connections.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
A callable is returned (``None`` was returned before).
|
||||
"""
|
||||
io_loop = IOLoop.current()
|
||||
removed = [False]
|
||||
|
||||
def accept_handler(fd: socket.socket, events: int) -> None:
|
||||
# More connections may come in while we're handling callbacks;
|
||||
# to prevent starvation of other tasks we must limit the number
|
||||
# of connections we accept at a time. Ideally we would accept
|
||||
# up to the number of connections that were waiting when we
|
||||
# entered this method, but this information is not available
|
||||
# (and rearranging this method to call accept() as many times
|
||||
# as possible before running any callbacks would have adverse
|
||||
# effects on load balancing in multiprocess configurations).
|
||||
# Instead, we use the (default) listen backlog as a rough
|
||||
# heuristic for the number of connections we can reasonably
|
||||
# accept at once.
|
||||
for i in range(_DEFAULT_BACKLOG):
|
||||
if removed[0]:
|
||||
# The socket was probably closed
|
||||
return
|
||||
try:
|
||||
connection, address = sock.accept()
|
||||
except BlockingIOError:
|
||||
# EWOULDBLOCK indicates we have accepted every
|
||||
# connection that is available.
|
||||
return
|
||||
except ConnectionAbortedError:
|
||||
# ECONNABORTED indicates that there was a connection
|
||||
# but it was closed while still in the accept queue.
|
||||
# (observed on FreeBSD).
|
||||
continue
|
||||
callback(connection, address)
|
||||
|
||||
def remove_handler() -> None:
|
||||
io_loop.remove_handler(sock)
|
||||
removed[0] = True
|
||||
|
||||
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
|
||||
return remove_handler
|
||||
|
||||
|
||||
def is_valid_ip(ip: str) -> bool:
|
||||
"""Returns ``True`` if the given string is a well-formed IP address.
|
||||
|
||||
Supports IPv4 and IPv6.
|
||||
"""
|
||||
if not ip or "\x00" in ip:
|
||||
# getaddrinfo resolves empty strings to localhost, and truncates
|
||||
# on zero bytes.
|
||||
return False
|
||||
try:
|
||||
res = socket.getaddrinfo(
|
||||
ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST
|
||||
)
|
||||
return bool(res)
|
||||
except socket.gaierror as e:
|
||||
if e.args[0] == socket.EAI_NONAME:
|
||||
return False
|
||||
raise
|
||||
return True
|
||||
|
||||
|
||||
class Resolver(Configurable):
|
||||
"""Configurable asynchronous DNS resolver interface.
|
||||
|
||||
By default, a blocking implementation is used (which simply calls
|
||||
`socket.getaddrinfo`). An alternative implementation can be
|
||||
chosen with the `Resolver.configure <.Configurable.configure>`
|
||||
class method::
|
||||
|
||||
Resolver.configure('tornado.netutil.ThreadedResolver')
|
||||
|
||||
The implementations of this interface included with Tornado are
|
||||
|
||||
* `tornado.netutil.DefaultExecutorResolver`
|
||||
* `tornado.netutil.BlockingResolver` (deprecated)
|
||||
* `tornado.netutil.ThreadedResolver` (deprecated)
|
||||
* `tornado.netutil.OverrideResolver`
|
||||
* `tornado.platform.twisted.TwistedResolver`
|
||||
* `tornado.platform.caresresolver.CaresResolver`
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The default implementation has changed from `BlockingResolver` to
|
||||
`DefaultExecutorResolver`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def configurable_base(cls) -> Type["Resolver"]:
|
||||
return Resolver
|
||||
|
||||
@classmethod
|
||||
def configurable_default(cls) -> Type["Resolver"]:
|
||||
return DefaultExecutorResolver
|
||||
|
||||
def resolve(
|
||||
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
|
||||
) -> Awaitable[List[Tuple[int, Any]]]:
|
||||
"""Resolves an address.
|
||||
|
||||
The ``host`` argument is a string which may be a hostname or a
|
||||
literal IP address.
|
||||
|
||||
Returns a `.Future` whose result is a list of (family,
|
||||
address) pairs, where address is a tuple suitable to pass to
|
||||
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
|
||||
port)`` pair for IPv4; additional fields may be present for
|
||||
IPv6). If a ``callback`` is passed, it will be run with the
|
||||
result as an argument when it is complete.
|
||||
|
||||
:raises IOError: if the address cannot be resolved.
|
||||
|
||||
.. versionchanged:: 4.4
|
||||
Standardized all implementations to raise `IOError`.
|
||||
|
||||
.. versionchanged:: 6.0 The ``callback`` argument was removed.
|
||||
Use the returned awaitable object instead.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Closes the `Resolver`, freeing any resources used.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _resolve_addr(
|
||||
host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
|
||||
) -> List[Tuple[int, Any]]:
|
||||
# On Solaris, getaddrinfo fails if the given port is not found
|
||||
# in /etc/services and no socket type is given, so we must pass
|
||||
# one here. The socket type used here doesn't seem to actually
|
||||
# matter (we discard the one we get back in the results),
|
||||
# so the addresses we return should still be usable with SOCK_DGRAM.
|
||||
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
|
||||
results = []
|
||||
for fam, socktype, proto, canonname, address in addrinfo:
|
||||
results.append((fam, address))
|
||||
return results # type: ignore
|
||||
|
||||
|
||||
class DefaultExecutorResolver(Resolver):
|
||||
"""Resolver implementation using `.IOLoop.run_in_executor`.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
|
||||
async def resolve(
|
||||
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
|
||||
) -> List[Tuple[int, Any]]:
|
||||
result = await IOLoop.current().run_in_executor(
|
||||
None, _resolve_addr, host, port, family
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class ExecutorResolver(Resolver):
|
||||
"""Resolver implementation using a `concurrent.futures.Executor`.
|
||||
|
||||
Use this instead of `ThreadedResolver` when you require additional
|
||||
control over the executor being used.
|
||||
|
||||
The executor will be shut down when the resolver is closed unless
|
||||
``close_resolver=False``; use this if you want to reuse the same
|
||||
executor elsewhere.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
|
||||
of this class.
|
||||
"""
|
||||
|
||||
def initialize(
|
||||
self,
|
||||
executor: Optional[concurrent.futures.Executor] = None,
|
||||
close_executor: bool = True,
|
||||
) -> None:
|
||||
self.io_loop = IOLoop.current()
|
||||
if executor is not None:
|
||||
self.executor = executor
|
||||
self.close_executor = close_executor
|
||||
else:
|
||||
self.executor = dummy_executor
|
||||
self.close_executor = False
|
||||
|
||||
def close(self) -> None:
|
||||
if self.close_executor:
|
||||
self.executor.shutdown()
|
||||
self.executor = None # type: ignore
|
||||
|
||||
@run_on_executor
|
||||
def resolve(
|
||||
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
|
||||
) -> List[Tuple[int, Any]]:
|
||||
return _resolve_addr(host, port, family)
|
||||
|
||||
|
||||
class BlockingResolver(ExecutorResolver):
|
||||
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
|
||||
|
||||
The `.IOLoop` will be blocked during the resolution, although the
|
||||
callback will not be run until the next `.IOLoop` iteration.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
|
||||
of this class.
|
||||
"""
|
||||
|
||||
def initialize(self) -> None: # type: ignore
|
||||
super().initialize()
|
||||
|
||||
|
||||
class ThreadedResolver(ExecutorResolver):
|
||||
"""Multithreaded non-blocking `Resolver` implementation.
|
||||
|
||||
Requires the `concurrent.futures` package to be installed
|
||||
(available in the standard library since Python 3.2,
|
||||
installable with ``pip install futures`` in older versions).
|
||||
|
||||
The thread pool size can be configured with::
|
||||
|
||||
Resolver.configure('tornado.netutil.ThreadedResolver',
|
||||
num_threads=10)
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
All ``ThreadedResolvers`` share a single thread pool, whose
|
||||
size is set by the first one to be created.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
|
||||
of this class.
|
||||
"""
|
||||
|
||||
_threadpool = None # type: ignore
|
||||
_threadpool_pid = None # type: int
|
||||
|
||||
def initialize(self, num_threads: int = 10) -> None: # type: ignore
|
||||
threadpool = ThreadedResolver._create_threadpool(num_threads)
|
||||
super().initialize(executor=threadpool, close_executor=False)
|
||||
|
||||
@classmethod
|
||||
def _create_threadpool(
|
||||
cls, num_threads: int
|
||||
) -> concurrent.futures.ThreadPoolExecutor:
|
||||
pid = os.getpid()
|
||||
if cls._threadpool_pid != pid:
|
||||
# Threads cannot survive after a fork, so if our pid isn't what it
|
||||
# was when we created the pool then delete it.
|
||||
cls._threadpool = None
|
||||
if cls._threadpool is None:
|
||||
cls._threadpool = concurrent.futures.ThreadPoolExecutor(num_threads)
|
||||
cls._threadpool_pid = pid
|
||||
return cls._threadpool
|
||||
|
||||
|
||||
class OverrideResolver(Resolver):
|
||||
"""Wraps a resolver with a mapping of overrides.
|
||||
|
||||
This can be used to make local DNS changes (e.g. for testing)
|
||||
without modifying system-wide settings.
|
||||
|
||||
The mapping can be in three formats::
|
||||
|
||||
{
|
||||
# Hostname to host or ip
|
||||
"example.com": "127.0.1.1",
|
||||
|
||||
# Host+port to host+port
|
||||
("login.example.com", 443): ("localhost", 1443),
|
||||
|
||||
# Host+port+address family to host+port
|
||||
("login.example.com", 443, socket.AF_INET6): ("::1", 1443),
|
||||
}
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
Added support for host-port-family triplets.
|
||||
"""
|
||||
|
||||
def initialize(self, resolver: Resolver, mapping: dict) -> None:
|
||||
self.resolver = resolver
|
||||
self.mapping = mapping
|
||||
|
||||
def close(self) -> None:
|
||||
self.resolver.close()
|
||||
|
||||
def resolve(
|
||||
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
|
||||
) -> Awaitable[List[Tuple[int, Any]]]:
|
||||
if (host, port, family) in self.mapping:
|
||||
host, port = self.mapping[(host, port, family)]
|
||||
elif (host, port) in self.mapping:
|
||||
host, port = self.mapping[(host, port)]
|
||||
elif host in self.mapping:
|
||||
host = self.mapping[host]
|
||||
return self.resolver.resolve(host, port, family)
|
||||
|
||||
|
||||
# These are the keyword arguments to ssl.wrap_socket that must be translated
|
||||
# to their SSLContext equivalents (the other arguments are still passed
|
||||
# to SSLContext.wrap_socket).
|
||||
_SSL_CONTEXT_KEYWORDS = frozenset(
|
||||
["ssl_version", "certfile", "keyfile", "cert_reqs", "ca_certs", "ciphers"]
|
||||
)
|
||||
|
||||
|
||||
def ssl_options_to_context(
|
||||
ssl_options: Union[Dict[str, Any], ssl.SSLContext]
|
||||
) -> ssl.SSLContext:
|
||||
"""Try to convert an ``ssl_options`` dictionary to an
|
||||
`~ssl.SSLContext` object.
|
||||
|
||||
The ``ssl_options`` dictionary contains keywords to be passed to
|
||||
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
|
||||
be used instead. This function converts the dict form to its
|
||||
`~ssl.SSLContext` equivalent, and may be used when a component which
|
||||
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
|
||||
to use features like SNI or NPN.
|
||||
"""
|
||||
if isinstance(ssl_options, ssl.SSLContext):
|
||||
return ssl_options
|
||||
assert isinstance(ssl_options, dict)
|
||||
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
|
||||
# Can't use create_default_context since this interface doesn't
|
||||
# tell us client vs server.
|
||||
context = ssl.SSLContext(ssl_options.get("ssl_version", ssl.PROTOCOL_SSLv23))
|
||||
if "certfile" in ssl_options:
|
||||
context.load_cert_chain(
|
||||
ssl_options["certfile"], ssl_options.get("keyfile", None)
|
||||
)
|
||||
if "cert_reqs" in ssl_options:
|
||||
context.verify_mode = ssl_options["cert_reqs"]
|
||||
if "ca_certs" in ssl_options:
|
||||
context.load_verify_locations(ssl_options["ca_certs"])
|
||||
if "ciphers" in ssl_options:
|
||||
context.set_ciphers(ssl_options["ciphers"])
|
||||
if hasattr(ssl, "OP_NO_COMPRESSION"):
|
||||
# Disable TLS compression to avoid CRIME and related attacks.
|
||||
# This constant depends on openssl version 1.0.
|
||||
# TODO: Do we need to do this ourselves or can we trust
|
||||
# the defaults?
|
||||
context.options |= ssl.OP_NO_COMPRESSION
|
||||
return context
|
||||
|
||||
|
||||
def ssl_wrap_socket(
|
||||
socket: socket.socket,
|
||||
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
|
||||
server_hostname: Optional[str] = None,
|
||||
**kwargs: Any
|
||||
) -> ssl.SSLSocket:
|
||||
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
|
||||
|
||||
``ssl_options`` may be either an `ssl.SSLContext` object or a
|
||||
dictionary (as accepted by `ssl_options_to_context`). Additional
|
||||
keyword arguments are passed to ``wrap_socket`` (either the
|
||||
`~ssl.SSLContext` method or the `ssl` module function as
|
||||
appropriate).
|
||||
"""
|
||||
context = ssl_options_to_context(ssl_options)
|
||||
if ssl.HAS_SNI:
|
||||
# In python 3.4, wrap_socket only accepts the server_hostname
|
||||
# argument if HAS_SNI is true.
|
||||
# TODO: add a unittest (python added server-side SNI support in 3.4)
|
||||
# In the meantime it can be manually tested with
|
||||
# python3 -m tornado.httpclient https://sni.velox.ch
|
||||
return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs)
|
||||
else:
|
||||
return context.wrap_socket(socket, **kwargs)
|
735
venv/Lib/site-packages/tornado/options.py
Normal file
735
venv/Lib/site-packages/tornado/options.py
Normal file
|
@ -0,0 +1,735 @@
|
|||
#
|
||||
# Copyright 2009 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A command line parsing module that lets modules define their own options.
|
||||
|
||||
This module is inspired by Google's `gflags
|
||||
<https://github.com/google/python-gflags>`_. The primary difference
|
||||
with libraries such as `argparse` is that a global registry is used so
|
||||
that options may be defined in any module (it also enables
|
||||
`tornado.log` by default). The rest of Tornado does not depend on this
|
||||
module, so feel free to use `argparse` or other configuration
|
||||
libraries if you prefer them.
|
||||
|
||||
Options must be defined with `tornado.options.define` before use,
|
||||
generally at the top level of a module. The options are then
|
||||
accessible as attributes of `tornado.options.options`::
|
||||
|
||||
# myapp/db.py
|
||||
from tornado.options import define, options
|
||||
|
||||
define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
|
||||
define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
|
||||
help="Main user memcache servers")
|
||||
|
||||
def connect():
|
||||
db = database.Connection(options.mysql_host)
|
||||
...
|
||||
|
||||
# myapp/server.py
|
||||
from tornado.options import define, options
|
||||
|
||||
define("port", default=8080, help="port to listen on")
|
||||
|
||||
def start_server():
|
||||
app = make_app()
|
||||
app.listen(options.port)
|
||||
|
||||
The ``main()`` method of your application does not need to be aware of all of
|
||||
the options used throughout your program; they are all automatically loaded
|
||||
when the modules are loaded. However, all modules that define options
|
||||
must have been imported before the command line is parsed.
|
||||
|
||||
Your ``main()`` method can parse the command line or parse a config file with
|
||||
either `parse_command_line` or `parse_config_file`::
|
||||
|
||||
import myapp.db, myapp.server
|
||||
import tornado.options
|
||||
|
||||
if __name__ == '__main__':
|
||||
tornado.options.parse_command_line()
|
||||
# or
|
||||
tornado.options.parse_config_file("/etc/server.conf")
|
||||
|
||||
.. note::
|
||||
|
||||
When using multiple ``parse_*`` functions, pass ``final=False`` to all
|
||||
but the last one, or side effects may occur twice (in particular,
|
||||
this can result in log messages being doubled).
|
||||
|
||||
`tornado.options.options` is a singleton instance of `OptionParser`, and
|
||||
the top-level functions in this module (`define`, `parse_command_line`, etc)
|
||||
simply call methods on it. You may create additional `OptionParser`
|
||||
instances to define isolated sets of options, such as for subcommands.
|
||||
|
||||
.. note::
|
||||
|
||||
By default, several options are defined that will configure the
|
||||
standard `logging` module when `parse_command_line` or `parse_config_file`
|
||||
are called. If you want Tornado to leave the logging configuration
|
||||
alone so you can manage it yourself, either pass ``--logging=none``
|
||||
on the command line or do the following to disable it in code::
|
||||
|
||||
from tornado.options import options, parse_command_line
|
||||
options.logging = None
|
||||
parse_command_line()
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Dashes and underscores are fully interchangeable in option names;
|
||||
options can be defined, set, and read with any mix of the two.
|
||||
Dashes are typical for command-line usage while config files require
|
||||
underscores.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import numbers
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
from tornado.escape import _unicode, native_str
|
||||
from tornado.log import define_logging_options
|
||||
from tornado.util import basestring_type, exec_in
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Iterator,
|
||||
Iterable,
|
||||
Tuple,
|
||||
Set,
|
||||
Dict,
|
||||
Callable,
|
||||
List,
|
||||
TextIO,
|
||||
Optional,
|
||||
)
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
"""Exception raised by errors in the options module."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class OptionParser(object):
|
||||
"""A collection of options, a dictionary with object-like access.
|
||||
|
||||
Normally accessed via static functions in the `tornado.options` module,
|
||||
which reference a global instance.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
# we have to use self.__dict__ because we override setattr.
|
||||
self.__dict__["_options"] = {}
|
||||
self.__dict__["_parse_callbacks"] = []
|
||||
self.define(
|
||||
"help",
|
||||
type=bool,
|
||||
help="show this help information",
|
||||
callback=self._help_callback,
|
||||
)
|
||||
|
||||
def _normalize_name(self, name: str) -> str:
|
||||
return name.replace("_", "-")
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
name = self._normalize_name(name)
|
||||
if isinstance(self._options.get(name), _Option):
|
||||
return self._options[name].value()
|
||||
raise AttributeError("Unrecognized option %r" % name)
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None:
|
||||
name = self._normalize_name(name)
|
||||
if isinstance(self._options.get(name), _Option):
|
||||
return self._options[name].set(value)
|
||||
raise AttributeError("Unrecognized option %r" % name)
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
return (opt.name for opt in self._options.values())
|
||||
|
||||
def __contains__(self, name: str) -> bool:
|
||||
name = self._normalize_name(name)
|
||||
return name in self._options
|
||||
|
||||
def __getitem__(self, name: str) -> Any:
|
||||
return self.__getattr__(name)
|
||||
|
||||
def __setitem__(self, name: str, value: Any) -> None:
|
||||
return self.__setattr__(name, value)
|
||||
|
||||
def items(self) -> Iterable[Tuple[str, Any]]:
|
||||
"""An iterable of (name, value) pairs.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
"""
|
||||
return [(opt.name, opt.value()) for name, opt in self._options.items()]
|
||||
|
||||
def groups(self) -> Set[str]:
|
||||
"""The set of option-groups created by ``define``.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
"""
|
||||
return set(opt.group_name for opt in self._options.values())
|
||||
|
||||
def group_dict(self, group: str) -> Dict[str, Any]:
|
||||
"""The names and values of options in a group.
|
||||
|
||||
Useful for copying options into Application settings::
|
||||
|
||||
from tornado.options import define, parse_command_line, options
|
||||
|
||||
define('template_path', group='application')
|
||||
define('static_path', group='application')
|
||||
|
||||
parse_command_line()
|
||||
|
||||
application = Application(
|
||||
handlers, **options.group_dict('application'))
|
||||
|
||||
.. versionadded:: 3.1
|
||||
"""
|
||||
return dict(
|
||||
(opt.name, opt.value())
|
||||
for name, opt in self._options.items()
|
||||
if not group or group == opt.group_name
|
||||
)
|
||||
|
||||
def as_dict(self) -> Dict[str, Any]:
|
||||
"""The names and values of all options.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
"""
|
||||
return dict((opt.name, opt.value()) for name, opt in self._options.items())
|
||||
|
||||
def define(
|
||||
self,
|
||||
name: str,
|
||||
default: Any = None,
|
||||
type: Optional[type] = None,
|
||||
help: Optional[str] = None,
|
||||
metavar: Optional[str] = None,
|
||||
multiple: bool = False,
|
||||
group: Optional[str] = None,
|
||||
callback: Optional[Callable[[Any], None]] = None,
|
||||
) -> None:
|
||||
"""Defines a new command line option.
|
||||
|
||||
``type`` can be any of `str`, `int`, `float`, `bool`,
|
||||
`~datetime.datetime`, or `~datetime.timedelta`. If no ``type``
|
||||
is given but a ``default`` is, ``type`` is the type of
|
||||
``default``. Otherwise, ``type`` defaults to `str`.
|
||||
|
||||
If ``multiple`` is True, the option value is a list of ``type``
|
||||
instead of an instance of ``type``.
|
||||
|
||||
``help`` and ``metavar`` are used to construct the
|
||||
automatically generated command line help string. The help
|
||||
message is formatted like::
|
||||
|
||||
--name=METAVAR help string
|
||||
|
||||
``group`` is used to group the defined options in logical
|
||||
groups. By default, command line options are grouped by the
|
||||
file in which they are defined.
|
||||
|
||||
Command line option names must be unique globally.
|
||||
|
||||
If a ``callback`` is given, it will be run with the new value whenever
|
||||
the option is changed. This can be used to combine command-line
|
||||
and file-based options::
|
||||
|
||||
define("config", type=str, help="path to config file",
|
||||
callback=lambda path: parse_config_file(path, final=False))
|
||||
|
||||
With this definition, options in the file specified by ``--config`` will
|
||||
override options set earlier on the command line, but can be overridden
|
||||
by later flags.
|
||||
|
||||
"""
|
||||
normalized = self._normalize_name(name)
|
||||
if normalized in self._options:
|
||||
raise Error(
|
||||
"Option %r already defined in %s"
|
||||
% (normalized, self._options[normalized].file_name)
|
||||
)
|
||||
frame = sys._getframe(0)
|
||||
options_file = frame.f_code.co_filename
|
||||
|
||||
# Can be called directly, or through top level define() fn, in which
|
||||
# case, step up above that frame to look for real caller.
|
||||
if (
|
||||
frame.f_back.f_code.co_filename == options_file
|
||||
and frame.f_back.f_code.co_name == "define"
|
||||
):
|
||||
frame = frame.f_back
|
||||
|
||||
file_name = frame.f_back.f_code.co_filename
|
||||
if file_name == options_file:
|
||||
file_name = ""
|
||||
if type is None:
|
||||
if not multiple and default is not None:
|
||||
type = default.__class__
|
||||
else:
|
||||
type = str
|
||||
if group:
|
||||
group_name = group # type: Optional[str]
|
||||
else:
|
||||
group_name = file_name
|
||||
option = _Option(
|
||||
name,
|
||||
file_name=file_name,
|
||||
default=default,
|
||||
type=type,
|
||||
help=help,
|
||||
metavar=metavar,
|
||||
multiple=multiple,
|
||||
group_name=group_name,
|
||||
callback=callback,
|
||||
)
|
||||
self._options[normalized] = option
|
||||
|
||||
def parse_command_line(
|
||||
self, args: Optional[List[str]] = None, final: bool = True
|
||||
) -> List[str]:
|
||||
"""Parses all options given on the command line (defaults to
|
||||
`sys.argv`).
|
||||
|
||||
Options look like ``--option=value`` and are parsed according
|
||||
to their ``type``. For boolean options, ``--option`` is
|
||||
equivalent to ``--option=true``
|
||||
|
||||
If the option has ``multiple=True``, comma-separated values
|
||||
are accepted. For multi-value integer options, the syntax
|
||||
``x:y`` is also accepted and equivalent to ``range(x, y)``.
|
||||
|
||||
Note that ``args[0]`` is ignored since it is the program name
|
||||
in `sys.argv`.
|
||||
|
||||
We return a list of all arguments that are not parsed as options.
|
||||
|
||||
If ``final`` is ``False``, parse callbacks will not be run.
|
||||
This is useful for applications that wish to combine configurations
|
||||
from multiple sources.
|
||||
|
||||
"""
|
||||
if args is None:
|
||||
args = sys.argv
|
||||
remaining = [] # type: List[str]
|
||||
for i in range(1, len(args)):
|
||||
# All things after the last option are command line arguments
|
||||
if not args[i].startswith("-"):
|
||||
remaining = args[i:]
|
||||
break
|
||||
if args[i] == "--":
|
||||
remaining = args[i + 1 :]
|
||||
break
|
||||
arg = args[i].lstrip("-")
|
||||
name, equals, value = arg.partition("=")
|
||||
name = self._normalize_name(name)
|
||||
if name not in self._options:
|
||||
self.print_help()
|
||||
raise Error("Unrecognized command line option: %r" % name)
|
||||
option = self._options[name]
|
||||
if not equals:
|
||||
if option.type == bool:
|
||||
value = "true"
|
||||
else:
|
||||
raise Error("Option %r requires a value" % name)
|
||||
option.parse(value)
|
||||
|
||||
if final:
|
||||
self.run_parse_callbacks()
|
||||
|
||||
return remaining
|
||||
|
||||
def parse_config_file(self, path: str, final: bool = True) -> None:
|
||||
"""Parses and loads the config file at the given path.
|
||||
|
||||
The config file contains Python code that will be executed (so
|
||||
it is **not safe** to use untrusted config files). Anything in
|
||||
the global namespace that matches a defined option will be
|
||||
used to set that option's value.
|
||||
|
||||
Options may either be the specified type for the option or
|
||||
strings (in which case they will be parsed the same way as in
|
||||
`.parse_command_line`)
|
||||
|
||||
Example (using the options defined in the top-level docs of
|
||||
this module)::
|
||||
|
||||
port = 80
|
||||
mysql_host = 'mydb.example.com:3306'
|
||||
# Both lists and comma-separated strings are allowed for
|
||||
# multiple=True.
|
||||
memcache_hosts = ['cache1.example.com:11011',
|
||||
'cache2.example.com:11011']
|
||||
memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011'
|
||||
|
||||
If ``final`` is ``False``, parse callbacks will not be run.
|
||||
This is useful for applications that wish to combine configurations
|
||||
from multiple sources.
|
||||
|
||||
.. note::
|
||||
|
||||
`tornado.options` is primarily a command-line library.
|
||||
Config file support is provided for applications that wish
|
||||
to use it, but applications that prefer config files may
|
||||
wish to look at other libraries instead.
|
||||
|
||||
.. versionchanged:: 4.1
|
||||
Config files are now always interpreted as utf-8 instead of
|
||||
the system default encoding.
|
||||
|
||||
.. versionchanged:: 4.4
|
||||
The special variable ``__file__`` is available inside config
|
||||
files, specifying the absolute path to the config file itself.
|
||||
|
||||
.. versionchanged:: 5.1
|
||||
Added the ability to set options via strings in config files.
|
||||
|
||||
"""
|
||||
config = {"__file__": os.path.abspath(path)}
|
||||
with open(path, "rb") as f:
|
||||
exec_in(native_str(f.read()), config, config)
|
||||
for name in config:
|
||||
normalized = self._normalize_name(name)
|
||||
if normalized in self._options:
|
||||
option = self._options[normalized]
|
||||
if option.multiple:
|
||||
if not isinstance(config[name], (list, str)):
|
||||
raise Error(
|
||||
"Option %r is required to be a list of %s "
|
||||
"or a comma-separated string"
|
||||
% (option.name, option.type.__name__)
|
||||
)
|
||||
|
||||
if type(config[name]) == str and option.type != str:
|
||||
option.parse(config[name])
|
||||
else:
|
||||
option.set(config[name])
|
||||
|
||||
if final:
|
||||
self.run_parse_callbacks()
|
||||
|
||||
def print_help(self, file: Optional[TextIO] = None) -> None:
|
||||
"""Prints all the command line options to stderr (or another file)."""
|
||||
if file is None:
|
||||
file = sys.stderr
|
||||
print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
|
||||
print("\nOptions:\n", file=file)
|
||||
by_group = {} # type: Dict[str, List[_Option]]
|
||||
for option in self._options.values():
|
||||
by_group.setdefault(option.group_name, []).append(option)
|
||||
|
||||
for filename, o in sorted(by_group.items()):
|
||||
if filename:
|
||||
print("\n%s options:\n" % os.path.normpath(filename), file=file)
|
||||
o.sort(key=lambda option: option.name)
|
||||
for option in o:
|
||||
# Always print names with dashes in a CLI context.
|
||||
prefix = self._normalize_name(option.name)
|
||||
if option.metavar:
|
||||
prefix += "=" + option.metavar
|
||||
description = option.help or ""
|
||||
if option.default is not None and option.default != "":
|
||||
description += " (default %s)" % option.default
|
||||
lines = textwrap.wrap(description, 79 - 35)
|
||||
if len(prefix) > 30 or len(lines) == 0:
|
||||
lines.insert(0, "")
|
||||
print(" --%-30s %s" % (prefix, lines[0]), file=file)
|
||||
for line in lines[1:]:
|
||||
print("%-34s %s" % (" ", line), file=file)
|
||||
print(file=file)
|
||||
|
||||
def _help_callback(self, value: bool) -> None:
|
||||
if value:
|
||||
self.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
def add_parse_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""Adds a parse callback, to be invoked when option parsing is done."""
|
||||
self._parse_callbacks.append(callback)
|
||||
|
||||
def run_parse_callbacks(self) -> None:
|
||||
for callback in self._parse_callbacks:
|
||||
callback()
|
||||
|
||||
def mockable(self) -> "_Mockable":
|
||||
"""Returns a wrapper around self that is compatible with
|
||||
`mock.patch <unittest.mock.patch>`.
|
||||
|
||||
The `mock.patch <unittest.mock.patch>` function (included in
|
||||
the standard library `unittest.mock` package since Python 3.3,
|
||||
or in the third-party ``mock`` package for older versions of
|
||||
Python) is incompatible with objects like ``options`` that
|
||||
override ``__getattr__`` and ``__setattr__``. This function
|
||||
returns an object that can be used with `mock.patch.object
|
||||
<unittest.mock.patch.object>` to modify option values::
|
||||
|
||||
with mock.patch.object(options.mockable(), 'name', value):
|
||||
assert options.name == value
|
||||
"""
|
||||
return _Mockable(self)
|
||||
|
||||
|
||||
class _Mockable(object):
|
||||
"""`mock.patch` compatible wrapper for `OptionParser`.
|
||||
|
||||
As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
|
||||
hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
|
||||
the attribute it set instead of setting a new one (assuming that
|
||||
the object does not capture ``__setattr__``, so the patch
|
||||
created a new attribute in ``__dict__``).
|
||||
|
||||
_Mockable's getattr and setattr pass through to the underlying
|
||||
OptionParser, and delattr undoes the effect of a previous setattr.
|
||||
"""
|
||||
|
||||
def __init__(self, options: OptionParser) -> None:
|
||||
# Modify __dict__ directly to bypass __setattr__
|
||||
self.__dict__["_options"] = options
|
||||
self.__dict__["_originals"] = {}
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
return getattr(self._options, name)
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None:
|
||||
assert name not in self._originals, "don't reuse mockable objects"
|
||||
self._originals[name] = getattr(self._options, name)
|
||||
setattr(self._options, name, value)
|
||||
|
||||
def __delattr__(self, name: str) -> None:
|
||||
setattr(self._options, name, self._originals.pop(name))
|
||||
|
||||
|
||||
class _Option(object):
|
||||
# This class could almost be made generic, but the way the types
|
||||
# interact with the multiple argument makes this tricky. (default
|
||||
# and the callback use List[T], but type is still Type[T]).
|
||||
UNSET = object()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
default: Any = None,
|
||||
type: Optional[type] = None,
|
||||
help: Optional[str] = None,
|
||||
metavar: Optional[str] = None,
|
||||
multiple: bool = False,
|
||||
file_name: Optional[str] = None,
|
||||
group_name: Optional[str] = None,
|
||||
callback: Optional[Callable[[Any], None]] = None,
|
||||
) -> None:
|
||||
if default is None and multiple:
|
||||
default = []
|
||||
self.name = name
|
||||
if type is None:
|
||||
raise ValueError("type must not be None")
|
||||
self.type = type
|
||||
self.help = help
|
||||
self.metavar = metavar
|
||||
self.multiple = multiple
|
||||
self.file_name = file_name
|
||||
self.group_name = group_name
|
||||
self.callback = callback
|
||||
self.default = default
|
||||
self._value = _Option.UNSET # type: Any
|
||||
|
||||
def value(self) -> Any:
|
||||
return self.default if self._value is _Option.UNSET else self._value
|
||||
|
||||
def parse(self, value: str) -> Any:
|
||||
_parse = {
|
||||
datetime.datetime: self._parse_datetime,
|
||||
datetime.timedelta: self._parse_timedelta,
|
||||
bool: self._parse_bool,
|
||||
basestring_type: self._parse_string,
|
||||
}.get(
|
||||
self.type, self.type
|
||||
) # type: Callable[[str], Any]
|
||||
if self.multiple:
|
||||
self._value = []
|
||||
for part in value.split(","):
|
||||
if issubclass(self.type, numbers.Integral):
|
||||
# allow ranges of the form X:Y (inclusive at both ends)
|
||||
lo_str, _, hi_str = part.partition(":")
|
||||
lo = _parse(lo_str)
|
||||
hi = _parse(hi_str) if hi_str else lo
|
||||
self._value.extend(range(lo, hi + 1))
|
||||
else:
|
||||
self._value.append(_parse(part))
|
||||
else:
|
||||
self._value = _parse(value)
|
||||
if self.callback is not None:
|
||||
self.callback(self._value)
|
||||
return self.value()
|
||||
|
||||
def set(self, value: Any) -> None:
|
||||
if self.multiple:
|
||||
if not isinstance(value, list):
|
||||
raise Error(
|
||||
"Option %r is required to be a list of %s"
|
||||
% (self.name, self.type.__name__)
|
||||
)
|
||||
for item in value:
|
||||
if item is not None and not isinstance(item, self.type):
|
||||
raise Error(
|
||||
"Option %r is required to be a list of %s"
|
||||
% (self.name, self.type.__name__)
|
||||
)
|
||||
else:
|
||||
if value is not None and not isinstance(value, self.type):
|
||||
raise Error(
|
||||
"Option %r is required to be a %s (%s given)"
|
||||
% (self.name, self.type.__name__, type(value))
|
||||
)
|
||||
self._value = value
|
||||
if self.callback is not None:
|
||||
self.callback(self._value)
|
||||
|
||||
# Supported date/time formats in our options
|
||||
_DATETIME_FORMATS = [
|
||||
"%a %b %d %H:%M:%S %Y",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%d %H:%M",
|
||||
"%Y-%m-%dT%H:%M",
|
||||
"%Y%m%d %H:%M:%S",
|
||||
"%Y%m%d %H:%M",
|
||||
"%Y-%m-%d",
|
||||
"%Y%m%d",
|
||||
"%H:%M:%S",
|
||||
"%H:%M",
|
||||
]
|
||||
|
||||
def _parse_datetime(self, value: str) -> datetime.datetime:
|
||||
for format in self._DATETIME_FORMATS:
|
||||
try:
|
||||
return datetime.datetime.strptime(value, format)
|
||||
except ValueError:
|
||||
pass
|
||||
raise Error("Unrecognized date/time format: %r" % value)
|
||||
|
||||
_TIMEDELTA_ABBREV_DICT = {
|
||||
"h": "hours",
|
||||
"m": "minutes",
|
||||
"min": "minutes",
|
||||
"s": "seconds",
|
||||
"sec": "seconds",
|
||||
"ms": "milliseconds",
|
||||
"us": "microseconds",
|
||||
"d": "days",
|
||||
"w": "weeks",
|
||||
}
|
||||
|
||||
_FLOAT_PATTERN = r"[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?"
|
||||
|
||||
_TIMEDELTA_PATTERN = re.compile(
|
||||
r"\s*(%s)\s*(\w*)\s*" % _FLOAT_PATTERN, re.IGNORECASE
|
||||
)
|
||||
|
||||
def _parse_timedelta(self, value: str) -> datetime.timedelta:
|
||||
try:
|
||||
sum = datetime.timedelta()
|
||||
start = 0
|
||||
while start < len(value):
|
||||
m = self._TIMEDELTA_PATTERN.match(value, start)
|
||||
if not m:
|
||||
raise Exception()
|
||||
num = float(m.group(1))
|
||||
units = m.group(2) or "seconds"
|
||||
units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
|
||||
sum += datetime.timedelta(**{units: num})
|
||||
start = m.end()
|
||||
return sum
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def _parse_bool(self, value: str) -> bool:
|
||||
return value.lower() not in ("false", "0", "f")
|
||||
|
||||
def _parse_string(self, value: str) -> str:
|
||||
return _unicode(value)
|
||||
|
||||
|
||||
options = OptionParser()
|
||||
"""Global options object.
|
||||
|
||||
All defined options are available as attributes on this object.
|
||||
"""
|
||||
|
||||
|
||||
def define(
|
||||
name: str,
|
||||
default: Any = None,
|
||||
type: Optional[type] = None,
|
||||
help: Optional[str] = None,
|
||||
metavar: Optional[str] = None,
|
||||
multiple: bool = False,
|
||||
group: Optional[str] = None,
|
||||
callback: Optional[Callable[[Any], None]] = None,
|
||||
) -> None:
|
||||
"""Defines an option in the global namespace.
|
||||
|
||||
See `OptionParser.define`.
|
||||
"""
|
||||
return options.define(
|
||||
name,
|
||||
default=default,
|
||||
type=type,
|
||||
help=help,
|
||||
metavar=metavar,
|
||||
multiple=multiple,
|
||||
group=group,
|
||||
callback=callback,
|
||||
)
|
||||
|
||||
|
||||
def parse_command_line(
|
||||
args: Optional[List[str]] = None, final: bool = True
|
||||
) -> List[str]:
|
||||
"""Parses global options from the command line.
|
||||
|
||||
See `OptionParser.parse_command_line`.
|
||||
"""
|
||||
return options.parse_command_line(args, final=final)
|
||||
|
||||
|
||||
def parse_config_file(path: str, final: bool = True) -> None:
|
||||
"""Parses global options from a config file.
|
||||
|
||||
See `OptionParser.parse_config_file`.
|
||||
"""
|
||||
return options.parse_config_file(path, final=final)
|
||||
|
||||
|
||||
def print_help(file: Optional[TextIO] = None) -> None:
|
||||
"""Prints all the command line options to stderr (or another file).
|
||||
|
||||
See `OptionParser.print_help`.
|
||||
"""
|
||||
return options.print_help(file)
|
||||
|
||||
|
||||
def add_parse_callback(callback: Callable[[], None]) -> None:
|
||||
"""Adds a parse callback, to be invoked when option parsing is done.
|
||||
|
||||
See `OptionParser.add_parse_callback`
|
||||
"""
|
||||
options.add_parse_callback(callback)
|
||||
|
||||
|
||||
# Default options
|
||||
define_logging_options(options)
|
0
venv/Lib/site-packages/tornado/platform/__init__.py
Normal file
0
venv/Lib/site-packages/tornado/platform/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
611
venv/Lib/site-packages/tornado/platform/asyncio.py
Normal file
611
venv/Lib/site-packages/tornado/platform/asyncio.py
Normal file
|
@ -0,0 +1,611 @@
|
|||
"""Bridges between the `asyncio` module and Tornado IOLoop.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
|
||||
This module integrates Tornado with the ``asyncio`` module introduced
|
||||
in Python 3.4. This makes it possible to combine the two libraries on
|
||||
the same event loop.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
|
||||
While the code in this module is still used, it is now enabled
|
||||
automatically when `asyncio` is available, so applications should
|
||||
no longer need to refer to this module directly.
|
||||
|
||||
.. note::
|
||||
|
||||
Tornado is designed to use a selector-based event loop. On Windows,
|
||||
where a proactor-based event loop has been the default since Python 3.8,
|
||||
a selector event loop is emulated by running ``select`` on a separate thread.
|
||||
Configuring ``asyncio`` to use a selector event loop may improve performance
|
||||
of Tornado (but may reduce performance of other ``asyncio``-based libraries
|
||||
in the same process).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
import concurrent.futures
|
||||
import errno
|
||||
import functools
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import typing
|
||||
from tornado.gen import convert_yielded
|
||||
from tornado.ioloop import IOLoop, _Selectable
|
||||
|
||||
from typing import Any, TypeVar, Awaitable, Callable, Union, Optional, List, Tuple, Dict
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Set # noqa: F401
|
||||
from typing_extensions import Protocol
|
||||
|
||||
class _HasFileno(Protocol):
|
||||
def fileno(self) -> int:
|
||||
pass
|
||||
|
||||
_FileDescriptorLike = Union[int, _HasFileno]
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
# Collection of selector thread event loops to shut down on exit.
|
||||
_selector_loops = set() # type: Set[AddThreadSelectorEventLoop]
|
||||
|
||||
|
||||
def _atexit_callback() -> None:
|
||||
for loop in _selector_loops:
|
||||
with loop._select_cond:
|
||||
loop._closing_selector = True
|
||||
loop._select_cond.notify()
|
||||
try:
|
||||
loop._waker_w.send(b"a")
|
||||
except BlockingIOError:
|
||||
pass
|
||||
# If we don't join our (daemon) thread here, we may get a deadlock
|
||||
# during interpreter shutdown. I don't really understand why. This
|
||||
# deadlock happens every time in CI (both travis and appveyor) but
|
||||
# I've never been able to reproduce locally.
|
||||
loop._thread.join()
|
||||
_selector_loops.clear()
|
||||
|
||||
|
||||
atexit.register(_atexit_callback)
|
||||
|
||||
|
||||
class BaseAsyncIOLoop(IOLoop):
|
||||
def initialize( # type: ignore
|
||||
self, asyncio_loop: asyncio.AbstractEventLoop, **kwargs: Any
|
||||
) -> None:
|
||||
# asyncio_loop is always the real underlying IOLoop. This is used in
|
||||
# ioloop.py to maintain the asyncio-to-ioloop mappings.
|
||||
self.asyncio_loop = asyncio_loop
|
||||
# selector_loop is an event loop that implements the add_reader family of
|
||||
# methods. Usually the same as asyncio_loop but differs on platforms such
|
||||
# as windows where the default event loop does not implement these methods.
|
||||
self.selector_loop = asyncio_loop
|
||||
if hasattr(asyncio, "ProactorEventLoop") and isinstance(
|
||||
asyncio_loop, asyncio.ProactorEventLoop # type: ignore
|
||||
):
|
||||
# Ignore this line for mypy because the abstract method checker
|
||||
# doesn't understand dynamic proxies.
|
||||
self.selector_loop = AddThreadSelectorEventLoop(asyncio_loop) # type: ignore
|
||||
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
|
||||
self.handlers = {} # type: Dict[int, Tuple[Union[int, _Selectable], Callable]]
|
||||
# Set of fds listening for reads/writes
|
||||
self.readers = set() # type: Set[int]
|
||||
self.writers = set() # type: Set[int]
|
||||
self.closing = False
|
||||
# If an asyncio loop was closed through an asyncio interface
|
||||
# instead of IOLoop.close(), we'd never hear about it and may
|
||||
# have left a dangling reference in our map. In case an
|
||||
# application (or, more likely, a test suite) creates and
|
||||
# destroys a lot of event loops in this way, check here to
|
||||
# ensure that we don't have a lot of dead loops building up in
|
||||
# the map.
|
||||
#
|
||||
# TODO(bdarnell): consider making self.asyncio_loop a weakref
|
||||
# for AsyncIOMainLoop and make _ioloop_for_asyncio a
|
||||
# WeakKeyDictionary.
|
||||
for loop in list(IOLoop._ioloop_for_asyncio):
|
||||
if loop.is_closed():
|
||||
del IOLoop._ioloop_for_asyncio[loop]
|
||||
IOLoop._ioloop_for_asyncio[asyncio_loop] = self
|
||||
|
||||
self._thread_identity = 0
|
||||
|
||||
super().initialize(**kwargs)
|
||||
|
||||
def assign_thread_identity() -> None:
|
||||
self._thread_identity = threading.get_ident()
|
||||
|
||||
self.add_callback(assign_thread_identity)
|
||||
|
||||
def close(self, all_fds: bool = False) -> None:
|
||||
self.closing = True
|
||||
for fd in list(self.handlers):
|
||||
fileobj, handler_func = self.handlers[fd]
|
||||
self.remove_handler(fd)
|
||||
if all_fds:
|
||||
self.close_fd(fileobj)
|
||||
# Remove the mapping before closing the asyncio loop. If this
|
||||
# happened in the other order, we could race against another
|
||||
# initialize() call which would see the closed asyncio loop,
|
||||
# assume it was closed from the asyncio side, and do this
|
||||
# cleanup for us, leading to a KeyError.
|
||||
del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
|
||||
if self.selector_loop is not self.asyncio_loop:
|
||||
self.selector_loop.close()
|
||||
self.asyncio_loop.close()
|
||||
|
||||
def add_handler(
|
||||
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
|
||||
) -> None:
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if fd in self.handlers:
|
||||
raise ValueError("fd %s added twice" % fd)
|
||||
self.handlers[fd] = (fileobj, handler)
|
||||
if events & IOLoop.READ:
|
||||
self.selector_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
|
||||
self.readers.add(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
self.selector_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE)
|
||||
self.writers.add(fd)
|
||||
|
||||
def update_handler(self, fd: Union[int, _Selectable], events: int) -> None:
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if events & IOLoop.READ:
|
||||
if fd not in self.readers:
|
||||
self.selector_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
|
||||
self.readers.add(fd)
|
||||
else:
|
||||
if fd in self.readers:
|
||||
self.selector_loop.remove_reader(fd)
|
||||
self.readers.remove(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
if fd not in self.writers:
|
||||
self.selector_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE)
|
||||
self.writers.add(fd)
|
||||
else:
|
||||
if fd in self.writers:
|
||||
self.selector_loop.remove_writer(fd)
|
||||
self.writers.remove(fd)
|
||||
|
||||
def remove_handler(self, fd: Union[int, _Selectable]) -> None:
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if fd not in self.handlers:
|
||||
return
|
||||
if fd in self.readers:
|
||||
self.selector_loop.remove_reader(fd)
|
||||
self.readers.remove(fd)
|
||||
if fd in self.writers:
|
||||
self.selector_loop.remove_writer(fd)
|
||||
self.writers.remove(fd)
|
||||
del self.handlers[fd]
|
||||
|
||||
def _handle_events(self, fd: int, events: int) -> None:
|
||||
fileobj, handler_func = self.handlers[fd]
|
||||
handler_func(fileobj, events)
|
||||
|
||||
def start(self) -> None:
|
||||
try:
|
||||
old_loop = asyncio.get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
old_loop = None # type: ignore
|
||||
try:
|
||||
self._setup_logging()
|
||||
asyncio.set_event_loop(self.asyncio_loop)
|
||||
self.asyncio_loop.run_forever()
|
||||
finally:
|
||||
asyncio.set_event_loop(old_loop)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.asyncio_loop.stop()
|
||||
|
||||
def call_at(
|
||||
self, when: float, callback: Callable[..., None], *args: Any, **kwargs: Any
|
||||
) -> object:
|
||||
# asyncio.call_at supports *args but not **kwargs, so bind them here.
|
||||
# We do not synchronize self.time and asyncio_loop.time, so
|
||||
# convert from absolute to relative.
|
||||
return self.asyncio_loop.call_later(
|
||||
max(0, when - self.time()),
|
||||
self._run_callback,
|
||||
functools.partial(callback, *args, **kwargs),
|
||||
)
|
||||
|
||||
def remove_timeout(self, timeout: object) -> None:
|
||||
timeout.cancel() # type: ignore
|
||||
|
||||
def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
|
||||
if threading.get_ident() == self._thread_identity:
|
||||
call_soon = self.asyncio_loop.call_soon
|
||||
else:
|
||||
call_soon = self.asyncio_loop.call_soon_threadsafe
|
||||
try:
|
||||
call_soon(self._run_callback, functools.partial(callback, *args, **kwargs))
|
||||
except RuntimeError:
|
||||
# "Event loop is closed". Swallow the exception for
|
||||
# consistency with PollIOLoop (and logical consistency
|
||||
# with the fact that we can't guarantee that an
|
||||
# add_callback that completes without error will
|
||||
# eventually execute).
|
||||
pass
|
||||
except AttributeError:
|
||||
# ProactorEventLoop may raise this instead of RuntimeError
|
||||
# if call_soon_threadsafe races with a call to close().
|
||||
# Swallow it too for consistency.
|
||||
pass
|
||||
|
||||
def add_callback_from_signal(
|
||||
self, callback: Callable, *args: Any, **kwargs: Any
|
||||
) -> None:
|
||||
try:
|
||||
self.asyncio_loop.call_soon_threadsafe(
|
||||
self._run_callback, functools.partial(callback, *args, **kwargs)
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
def run_in_executor(
|
||||
self,
|
||||
executor: Optional[concurrent.futures.Executor],
|
||||
func: Callable[..., _T],
|
||||
*args: Any
|
||||
) -> Awaitable[_T]:
|
||||
return self.asyncio_loop.run_in_executor(executor, func, *args)
|
||||
|
||||
def set_default_executor(self, executor: concurrent.futures.Executor) -> None:
|
||||
return self.asyncio_loop.set_default_executor(executor)
|
||||
|
||||
|
||||
class AsyncIOMainLoop(BaseAsyncIOLoop):
|
||||
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
|
||||
current ``asyncio`` event loop (i.e. the one returned by
|
||||
``asyncio.get_event_loop()``).
|
||||
|
||||
.. deprecated:: 5.0
|
||||
|
||||
Now used automatically when appropriate; it is no longer necessary
|
||||
to refer to this class directly.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
|
||||
"""
|
||||
|
||||
def initialize(self, **kwargs: Any) -> None: # type: ignore
|
||||
super().initialize(asyncio.get_event_loop(), **kwargs)
|
||||
|
||||
def make_current(self) -> None:
|
||||
# AsyncIOMainLoop already refers to the current asyncio loop so
|
||||
# nothing to do here.
|
||||
pass
|
||||
|
||||
|
||||
class AsyncIOLoop(BaseAsyncIOLoop):
|
||||
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
|
||||
This class follows the usual Tornado semantics for creating new
|
||||
``IOLoops``; these loops are not necessarily related to the
|
||||
``asyncio`` default event loop.
|
||||
|
||||
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
|
||||
can be accessed with the ``asyncio_loop`` attribute.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
|
||||
When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
|
||||
the current `asyncio` event loop.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
|
||||
Now used automatically when appropriate; it is no longer necessary
|
||||
to refer to this class directly.
|
||||
"""
|
||||
|
||||
def initialize(self, **kwargs: Any) -> None: # type: ignore
|
||||
self.is_current = False
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
super().initialize(loop, **kwargs)
|
||||
except Exception:
|
||||
# If initialize() does not succeed (taking ownership of the loop),
|
||||
# we have to close it.
|
||||
loop.close()
|
||||
raise
|
||||
|
||||
def close(self, all_fds: bool = False) -> None:
|
||||
if self.is_current:
|
||||
self.clear_current()
|
||||
super().close(all_fds=all_fds)
|
||||
|
||||
def make_current(self) -> None:
|
||||
if not self.is_current:
|
||||
try:
|
||||
self.old_asyncio = asyncio.get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
self.old_asyncio = None # type: ignore
|
||||
self.is_current = True
|
||||
asyncio.set_event_loop(self.asyncio_loop)
|
||||
|
||||
def _clear_current_hook(self) -> None:
|
||||
if self.is_current:
|
||||
asyncio.set_event_loop(self.old_asyncio)
|
||||
self.is_current = False
|
||||
|
||||
|
||||
def to_tornado_future(asyncio_future: asyncio.Future) -> asyncio.Future:
|
||||
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
|
||||
|
||||
.. versionadded:: 4.1
|
||||
|
||||
.. deprecated:: 5.0
|
||||
Tornado ``Futures`` have been merged with `asyncio.Future`,
|
||||
so this method is now a no-op.
|
||||
"""
|
||||
return asyncio_future
|
||||
|
||||
|
||||
def to_asyncio_future(tornado_future: asyncio.Future) -> asyncio.Future:
|
||||
"""Convert a Tornado yieldable object to an `asyncio.Future`.
|
||||
|
||||
.. versionadded:: 4.1
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Now accepts any yieldable object, not just
|
||||
`tornado.concurrent.Future`.
|
||||
|
||||
.. deprecated:: 5.0
|
||||
Tornado ``Futures`` have been merged with `asyncio.Future`,
|
||||
so this method is now equivalent to `tornado.gen.convert_yielded`.
|
||||
"""
|
||||
return convert_yielded(tornado_future)
|
||||
|
||||
|
||||
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
|
||||
# "Any thread" and "selector" should be orthogonal, but there's not a clean
|
||||
# interface for composing policies so pick the right base.
|
||||
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
|
||||
else:
|
||||
_BasePolicy = asyncio.DefaultEventLoopPolicy
|
||||
|
||||
|
||||
class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
|
||||
"""Event loop policy that allows loop creation on any thread.
|
||||
|
||||
The default `asyncio` event loop policy only automatically creates
|
||||
event loops in the main threads. Other threads must create event
|
||||
loops explicitly or `asyncio.get_event_loop` (and therefore
|
||||
`.IOLoop.current`) will fail. Installing this policy allows event
|
||||
loops to be created automatically on any thread, matching the
|
||||
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
|
||||
|
||||
Usage::
|
||||
|
||||
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
"""
|
||||
|
||||
def get_event_loop(self) -> asyncio.AbstractEventLoop:
|
||||
try:
|
||||
return super().get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
# This was an AssertionError in Python 3.4.2 (which ships with Debian Jessie)
|
||||
# and changed to a RuntimeError in 3.4.3.
|
||||
# "There is no current event loop in thread %r"
|
||||
loop = self.new_event_loop()
|
||||
self.set_event_loop(loop)
|
||||
return loop
|
||||
|
||||
|
||||
class AddThreadSelectorEventLoop(asyncio.AbstractEventLoop):
|
||||
"""Wrap an event loop to add implementations of the ``add_reader`` method family.
|
||||
|
||||
Instances of this class start a second thread to run a selector.
|
||||
This thread is completely hidden from the user; all callbacks are
|
||||
run on the wrapped event loop's thread.
|
||||
|
||||
This class is used automatically by Tornado; applications should not need
|
||||
to refer to it directly.
|
||||
|
||||
It is safe to wrap any event loop with this class, although it only makes sense
|
||||
for event loops that do not implement the ``add_reader`` family of methods
|
||||
themselves (i.e. ``WindowsProactorEventLoop``)
|
||||
|
||||
Closing the ``AddThreadSelectorEventLoop`` also closes the wrapped event loop.
|
||||
|
||||
"""
|
||||
|
||||
# This class is a __getattribute__-based proxy. All attributes other than those
|
||||
# in this set are proxied through to the underlying loop.
|
||||
MY_ATTRIBUTES = {
|
||||
"_consume_waker",
|
||||
"_select_cond",
|
||||
"_select_args",
|
||||
"_closing_selector",
|
||||
"_thread",
|
||||
"_handle_event",
|
||||
"_readers",
|
||||
"_real_loop",
|
||||
"_start_select",
|
||||
"_run_select",
|
||||
"_handle_select",
|
||||
"_wake_selector",
|
||||
"_waker_r",
|
||||
"_waker_w",
|
||||
"_writers",
|
||||
"add_reader",
|
||||
"add_writer",
|
||||
"close",
|
||||
"remove_reader",
|
||||
"remove_writer",
|
||||
}
|
||||
|
||||
def __getattribute__(self, name: str) -> Any:
|
||||
if name in AddThreadSelectorEventLoop.MY_ATTRIBUTES:
|
||||
return super().__getattribute__(name)
|
||||
return getattr(self._real_loop, name)
|
||||
|
||||
def __init__(self, real_loop: asyncio.AbstractEventLoop) -> None:
|
||||
self._real_loop = real_loop
|
||||
|
||||
# Create a thread to run the select system call. We manage this thread
|
||||
# manually so we can trigger a clean shutdown from an atexit hook. Note
|
||||
# that due to the order of operations at shutdown, only daemon threads
|
||||
# can be shut down in this way (non-daemon threads would require the
|
||||
# introduction of a new hook: https://bugs.python.org/issue41962)
|
||||
self._select_cond = threading.Condition()
|
||||
self._select_args = (
|
||||
None
|
||||
) # type: Optional[Tuple[List[_FileDescriptorLike], List[_FileDescriptorLike]]]
|
||||
self._closing_selector = False
|
||||
self._thread = threading.Thread(
|
||||
name="Tornado selector", daemon=True, target=self._run_select,
|
||||
)
|
||||
self._thread.start()
|
||||
# Start the select loop once the loop is started.
|
||||
self._real_loop.call_soon(self._start_select)
|
||||
|
||||
self._readers = {} # type: Dict[_FileDescriptorLike, Callable]
|
||||
self._writers = {} # type: Dict[_FileDescriptorLike, Callable]
|
||||
|
||||
# Writing to _waker_w will wake up the selector thread, which
|
||||
# watches for _waker_r to be readable.
|
||||
self._waker_r, self._waker_w = socket.socketpair()
|
||||
self._waker_r.setblocking(False)
|
||||
self._waker_w.setblocking(False)
|
||||
_selector_loops.add(self)
|
||||
self.add_reader(self._waker_r, self._consume_waker)
|
||||
|
||||
def __del__(self) -> None:
|
||||
# If the top-level application code uses asyncio interfaces to
|
||||
# start and stop the event loop, no objects created in Tornado
|
||||
# can get a clean shutdown notification. If we're just left to
|
||||
# be GC'd, we must explicitly close our sockets to avoid
|
||||
# logging warnings.
|
||||
_selector_loops.discard(self)
|
||||
self._waker_r.close()
|
||||
self._waker_w.close()
|
||||
|
||||
def close(self) -> None:
|
||||
with self._select_cond:
|
||||
self._closing_selector = True
|
||||
self._select_cond.notify()
|
||||
self._wake_selector()
|
||||
self._thread.join()
|
||||
_selector_loops.discard(self)
|
||||
self._waker_r.close()
|
||||
self._waker_w.close()
|
||||
self._real_loop.close()
|
||||
|
||||
def _wake_selector(self) -> None:
|
||||
try:
|
||||
self._waker_w.send(b"a")
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
def _consume_waker(self) -> None:
|
||||
try:
|
||||
self._waker_r.recv(1024)
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
def _start_select(self) -> None:
|
||||
# Capture reader and writer sets here in the event loop
|
||||
# thread to avoid any problems with concurrent
|
||||
# modification while the select loop uses them.
|
||||
with self._select_cond:
|
||||
assert self._select_args is None
|
||||
self._select_args = (list(self._readers.keys()), list(self._writers.keys()))
|
||||
self._select_cond.notify()
|
||||
|
||||
def _run_select(self) -> None:
|
||||
while True:
|
||||
with self._select_cond:
|
||||
while self._select_args is None and not self._closing_selector:
|
||||
self._select_cond.wait()
|
||||
if self._closing_selector:
|
||||
return
|
||||
assert self._select_args is not None
|
||||
to_read, to_write = self._select_args
|
||||
self._select_args = None
|
||||
|
||||
# We use the simpler interface of the select module instead of
|
||||
# the more stateful interface in the selectors module because
|
||||
# this class is only intended for use on windows, where
|
||||
# select.select is the only option. The selector interface
|
||||
# does not have well-documented thread-safety semantics that
|
||||
# we can rely on so ensuring proper synchronization would be
|
||||
# tricky.
|
||||
try:
|
||||
# On windows, selecting on a socket for write will not
|
||||
# return the socket when there is an error (but selecting
|
||||
# for reads works). Also select for errors when selecting
|
||||
# for writes, and merge the results.
|
||||
#
|
||||
# This pattern is also used in
|
||||
# https://github.com/python/cpython/blob/v3.8.0/Lib/selectors.py#L312-L317
|
||||
rs, ws, xs = select.select(to_read, to_write, to_write)
|
||||
ws = ws + xs
|
||||
except OSError as e:
|
||||
# After remove_reader or remove_writer is called, the file
|
||||
# descriptor may subsequently be closed on the event loop
|
||||
# thread. It's possible that this select thread hasn't
|
||||
# gotten into the select system call by the time that
|
||||
# happens in which case (at least on macOS), select may
|
||||
# raise a "bad file descriptor" error. If we get that
|
||||
# error, check and see if we're also being woken up by
|
||||
# polling the waker alone. If we are, just return to the
|
||||
# event loop and we'll get the updated set of file
|
||||
# descriptors on the next iteration. Otherwise, raise the
|
||||
# original error.
|
||||
if e.errno == getattr(errno, "WSAENOTSOCK", errno.EBADF):
|
||||
rs, _, _ = select.select([self._waker_r.fileno()], [], [], 0)
|
||||
if rs:
|
||||
ws = []
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raise
|
||||
self._real_loop.call_soon_threadsafe(self._handle_select, rs, ws)
|
||||
|
||||
def _handle_select(
|
||||
self, rs: List["_FileDescriptorLike"], ws: List["_FileDescriptorLike"]
|
||||
) -> None:
|
||||
for r in rs:
|
||||
self._handle_event(r, self._readers)
|
||||
for w in ws:
|
||||
self._handle_event(w, self._writers)
|
||||
self._start_select()
|
||||
|
||||
def _handle_event(
|
||||
self, fd: "_FileDescriptorLike", cb_map: Dict["_FileDescriptorLike", Callable],
|
||||
) -> None:
|
||||
try:
|
||||
callback = cb_map[fd]
|
||||
except KeyError:
|
||||
return
|
||||
callback()
|
||||
|
||||
def add_reader(
|
||||
self, fd: "_FileDescriptorLike", callback: Callable[..., None], *args: Any
|
||||
) -> None:
|
||||
self._readers[fd] = functools.partial(callback, *args)
|
||||
self._wake_selector()
|
||||
|
||||
def add_writer(
|
||||
self, fd: "_FileDescriptorLike", callback: Callable[..., None], *args: Any
|
||||
) -> None:
|
||||
self._writers[fd] = functools.partial(callback, *args)
|
||||
self._wake_selector()
|
||||
|
||||
def remove_reader(self, fd: "_FileDescriptorLike") -> None:
|
||||
del self._readers[fd]
|
||||
self._wake_selector()
|
||||
|
||||
def remove_writer(self, fd: "_FileDescriptorLike") -> None:
|
||||
del self._writers[fd]
|
||||
self._wake_selector()
|
89
venv/Lib/site-packages/tornado/platform/caresresolver.py
Normal file
89
venv/Lib/site-packages/tornado/platform/caresresolver.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
import pycares # type: ignore
|
||||
import socket
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.netutil import Resolver, is_valid_ip
|
||||
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Generator, Any, List, Tuple, Dict # noqa: F401
|
||||
|
||||
|
||||
class CaresResolver(Resolver):
|
||||
"""Name resolver based on the c-ares library.
|
||||
|
||||
This is a non-blocking and non-threaded resolver. It may not produce
|
||||
the same results as the system resolver, but can be used for non-blocking
|
||||
resolution when threads cannot be used.
|
||||
|
||||
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
|
||||
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
|
||||
the default for ``tornado.simple_httpclient``, but other libraries
|
||||
may default to ``AF_UNSPEC``.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
"""
|
||||
|
||||
def initialize(self) -> None:
|
||||
self.io_loop = IOLoop.current()
|
||||
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
|
||||
self.fds = {} # type: Dict[int, int]
|
||||
|
||||
def _sock_state_cb(self, fd: int, readable: bool, writable: bool) -> None:
|
||||
state = (IOLoop.READ if readable else 0) | (IOLoop.WRITE if writable else 0)
|
||||
if not state:
|
||||
self.io_loop.remove_handler(fd)
|
||||
del self.fds[fd]
|
||||
elif fd in self.fds:
|
||||
self.io_loop.update_handler(fd, state)
|
||||
self.fds[fd] = state
|
||||
else:
|
||||
self.io_loop.add_handler(fd, self._handle_events, state)
|
||||
self.fds[fd] = state
|
||||
|
||||
def _handle_events(self, fd: int, events: int) -> None:
|
||||
read_fd = pycares.ARES_SOCKET_BAD
|
||||
write_fd = pycares.ARES_SOCKET_BAD
|
||||
if events & IOLoop.READ:
|
||||
read_fd = fd
|
||||
if events & IOLoop.WRITE:
|
||||
write_fd = fd
|
||||
self.channel.process_fd(read_fd, write_fd)
|
||||
|
||||
@gen.coroutine
|
||||
def resolve(
|
||||
self, host: str, port: int, family: int = 0
|
||||
) -> "Generator[Any, Any, List[Tuple[int, Any]]]":
|
||||
if is_valid_ip(host):
|
||||
addresses = [host]
|
||||
else:
|
||||
# gethostbyname doesn't take callback as a kwarg
|
||||
fut = Future() # type: Future[Tuple[Any, Any]]
|
||||
self.channel.gethostbyname(
|
||||
host, family, lambda result, error: fut.set_result((result, error))
|
||||
)
|
||||
result, error = yield fut
|
||||
if error:
|
||||
raise IOError(
|
||||
"C-Ares returned error %s: %s while resolving %s"
|
||||
% (error, pycares.errno.strerror(error), host)
|
||||
)
|
||||
addresses = result.addresses
|
||||
addrinfo = []
|
||||
for address in addresses:
|
||||
if "." in address:
|
||||
address_family = socket.AF_INET
|
||||
elif ":" in address:
|
||||
address_family = socket.AF_INET6
|
||||
else:
|
||||
address_family = socket.AF_UNSPEC
|
||||
if family != socket.AF_UNSPEC and family != address_family:
|
||||
raise IOError(
|
||||
"Requested socket family %d but got %d" % (family, address_family)
|
||||
)
|
||||
addrinfo.append((typing.cast(int, address_family), (address, port)))
|
||||
return addrinfo
|
146
venv/Lib/site-packages/tornado/platform/twisted.py
Normal file
146
venv/Lib/site-packages/tornado/platform/twisted.py
Normal file
|
@ -0,0 +1,146 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Bridges between the Twisted package and Tornado.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import twisted.internet.abstract # type: ignore
|
||||
import twisted.internet.asyncioreactor # type: ignore
|
||||
from twisted.internet.defer import Deferred # type: ignore
|
||||
from twisted.python import failure # type: ignore
|
||||
import twisted.names.cache # type: ignore
|
||||
import twisted.names.client # type: ignore
|
||||
import twisted.names.hosts # type: ignore
|
||||
import twisted.names.resolve # type: ignore
|
||||
|
||||
|
||||
from tornado.concurrent import Future, future_set_exc_info
|
||||
from tornado.escape import utf8
|
||||
from tornado import gen
|
||||
from tornado.netutil import Resolver
|
||||
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Generator, Any, List, Tuple # noqa: F401
|
||||
|
||||
|
||||
class TwistedResolver(Resolver):
|
||||
"""Twisted-based asynchronous resolver.
|
||||
|
||||
This is a non-blocking and non-threaded resolver. It is
|
||||
recommended only when threads cannot be used, since it has
|
||||
limitations compared to the standard ``getaddrinfo``-based
|
||||
`~tornado.netutil.Resolver` and
|
||||
`~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at
|
||||
most one result, and arguments other than ``host`` and ``family``
|
||||
are ignored. It may fail to resolve when ``family`` is not
|
||||
``socket.AF_UNSPEC``.
|
||||
|
||||
Requires Twisted 12.1 or newer.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
"""
|
||||
|
||||
def initialize(self) -> None:
|
||||
# partial copy of twisted.names.client.createResolver, which doesn't
|
||||
# allow for a reactor to be passed in.
|
||||
self.reactor = twisted.internet.asyncioreactor.AsyncioSelectorReactor()
|
||||
|
||||
host_resolver = twisted.names.hosts.Resolver("/etc/hosts")
|
||||
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
|
||||
real_resolver = twisted.names.client.Resolver(
|
||||
"/etc/resolv.conf", reactor=self.reactor
|
||||
)
|
||||
self.resolver = twisted.names.resolve.ResolverChain(
|
||||
[host_resolver, cache_resolver, real_resolver]
|
||||
)
|
||||
|
||||
@gen.coroutine
|
||||
def resolve(
|
||||
self, host: str, port: int, family: int = 0
|
||||
) -> "Generator[Any, Any, List[Tuple[int, Any]]]":
|
||||
# getHostByName doesn't accept IP addresses, so if the input
|
||||
# looks like an IP address just return it immediately.
|
||||
if twisted.internet.abstract.isIPAddress(host):
|
||||
resolved = host
|
||||
resolved_family = socket.AF_INET
|
||||
elif twisted.internet.abstract.isIPv6Address(host):
|
||||
resolved = host
|
||||
resolved_family = socket.AF_INET6
|
||||
else:
|
||||
deferred = self.resolver.getHostByName(utf8(host))
|
||||
fut = Future() # type: Future[Any]
|
||||
deferred.addBoth(fut.set_result)
|
||||
resolved = yield fut
|
||||
if isinstance(resolved, failure.Failure):
|
||||
try:
|
||||
resolved.raiseException()
|
||||
except twisted.names.error.DomainError as e:
|
||||
raise IOError(e)
|
||||
elif twisted.internet.abstract.isIPAddress(resolved):
|
||||
resolved_family = socket.AF_INET
|
||||
elif twisted.internet.abstract.isIPv6Address(resolved):
|
||||
resolved_family = socket.AF_INET6
|
||||
else:
|
||||
resolved_family = socket.AF_UNSPEC
|
||||
if family != socket.AF_UNSPEC and family != resolved_family:
|
||||
raise Exception(
|
||||
"Requested socket family %d but got %d" % (family, resolved_family)
|
||||
)
|
||||
result = [(typing.cast(int, resolved_family), (resolved, port))]
|
||||
return result
|
||||
|
||||
|
||||
def install() -> None:
|
||||
"""Install ``AsyncioSelectorReactor`` as the default Twisted reactor.
|
||||
|
||||
.. deprecated:: 5.1
|
||||
|
||||
This function is provided for backwards compatibility; code
|
||||
that does not require compatibility with older versions of
|
||||
Tornado should use
|
||||
``twisted.internet.asyncioreactor.install()`` directly.
|
||||
|
||||
.. versionchanged:: 6.0.3
|
||||
|
||||
In Tornado 5.x and before, this function installed a reactor
|
||||
based on the Tornado ``IOLoop``. When that reactor
|
||||
implementation was removed in Tornado 6.0.0, this function was
|
||||
removed as well. It was restored in Tornado 6.0.3 using the
|
||||
``asyncio`` reactor instead.
|
||||
|
||||
"""
|
||||
from twisted.internet.asyncioreactor import install
|
||||
|
||||
install()
|
||||
|
||||
|
||||
if hasattr(gen.convert_yielded, "register"):
|
||||
|
||||
@gen.convert_yielded.register(Deferred) # type: ignore
|
||||
def _(d: Deferred) -> Future:
|
||||
f = Future() # type: Future[Any]
|
||||
|
||||
def errback(failure: failure.Failure) -> None:
|
||||
try:
|
||||
failure.raiseException()
|
||||
# Should never happen, but just in case
|
||||
raise Exception("errback called without error")
|
||||
except:
|
||||
future_set_exc_info(f, sys.exc_info())
|
||||
|
||||
d.addCallbacks(f.set_result, errback)
|
||||
return f
|
373
venv/Lib/site-packages/tornado/process.py
Normal file
373
venv/Lib/site-packages/tornado/process.py
Normal file
|
@ -0,0 +1,373 @@
|
|||
#
|
||||
# Copyright 2011 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utilities for working with multiple processes, including both forking
|
||||
the server into multiple processes and managing subprocesses.
|
||||
"""
|
||||
|
||||
import os
|
||||
import multiprocessing
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from binascii import hexlify
|
||||
|
||||
from tornado.concurrent import (
|
||||
Future,
|
||||
future_set_result_unless_cancelled,
|
||||
future_set_exception_unless_cancelled,
|
||||
)
|
||||
from tornado import ioloop
|
||||
from tornado.iostream import PipeIOStream
|
||||
from tornado.log import gen_log
|
||||
|
||||
import typing
|
||||
from typing import Optional, Any, Callable
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import List # noqa: F401
|
||||
|
||||
# Re-export this exception for convenience.
|
||||
CalledProcessError = subprocess.CalledProcessError
|
||||
|
||||
|
||||
def cpu_count() -> int:
|
||||
"""Returns the number of processors on this machine."""
|
||||
if multiprocessing is None:
|
||||
return 1
|
||||
try:
|
||||
return multiprocessing.cpu_count()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
try:
|
||||
return os.sysconf("SC_NPROCESSORS_CONF") # type: ignore
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
gen_log.error("Could not detect number of processors; assuming 1")
|
||||
return 1
|
||||
|
||||
|
||||
def _reseed_random() -> None:
|
||||
if "random" not in sys.modules:
|
||||
return
|
||||
import random
|
||||
|
||||
# If os.urandom is available, this method does the same thing as
|
||||
# random.seed (at least as of python 2.6). If os.urandom is not
|
||||
# available, we mix in the pid in addition to a timestamp.
|
||||
try:
|
||||
seed = int(hexlify(os.urandom(16)), 16)
|
||||
except NotImplementedError:
|
||||
seed = int(time.time() * 1000) ^ os.getpid()
|
||||
random.seed(seed)
|
||||
|
||||
|
||||
_task_id = None
|
||||
|
||||
|
||||
def fork_processes(
|
||||
num_processes: Optional[int], max_restarts: Optional[int] = None
|
||||
) -> int:
|
||||
"""Starts multiple worker processes.
|
||||
|
||||
If ``num_processes`` is None or <= 0, we detect the number of cores
|
||||
available on this machine and fork that number of child
|
||||
processes. If ``num_processes`` is given and > 0, we fork that
|
||||
specific number of sub-processes.
|
||||
|
||||
Since we use processes and not threads, there is no shared memory
|
||||
between any server code.
|
||||
|
||||
Note that multiple processes are not compatible with the autoreload
|
||||
module (or the ``autoreload=True`` option to `tornado.web.Application`
|
||||
which defaults to True when ``debug=True``).
|
||||
When using multiple processes, no IOLoops can be created or
|
||||
referenced until after the call to ``fork_processes``.
|
||||
|
||||
In each child process, ``fork_processes`` returns its *task id*, a
|
||||
number between 0 and ``num_processes``. Processes that exit
|
||||
abnormally (due to a signal or non-zero exit status) are restarted
|
||||
with the same id (up to ``max_restarts`` times). In the parent
|
||||
process, ``fork_processes`` calls ``sys.exit(0)`` after all child
|
||||
processes have exited normally.
|
||||
|
||||
max_restarts defaults to 100.
|
||||
|
||||
Availability: Unix
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# The exact form of this condition matters to mypy; it understands
|
||||
# if but not assert in this context.
|
||||
raise Exception("fork not available on windows")
|
||||
if max_restarts is None:
|
||||
max_restarts = 100
|
||||
|
||||
global _task_id
|
||||
assert _task_id is None
|
||||
if num_processes is None or num_processes <= 0:
|
||||
num_processes = cpu_count()
|
||||
gen_log.info("Starting %d processes", num_processes)
|
||||
children = {}
|
||||
|
||||
def start_child(i: int) -> Optional[int]:
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# child process
|
||||
_reseed_random()
|
||||
global _task_id
|
||||
_task_id = i
|
||||
return i
|
||||
else:
|
||||
children[pid] = i
|
||||
return None
|
||||
|
||||
for i in range(num_processes):
|
||||
id = start_child(i)
|
||||
if id is not None:
|
||||
return id
|
||||
num_restarts = 0
|
||||
while children:
|
||||
pid, status = os.wait()
|
||||
if pid not in children:
|
||||
continue
|
||||
id = children.pop(pid)
|
||||
if os.WIFSIGNALED(status):
|
||||
gen_log.warning(
|
||||
"child %d (pid %d) killed by signal %d, restarting",
|
||||
id,
|
||||
pid,
|
||||
os.WTERMSIG(status),
|
||||
)
|
||||
elif os.WEXITSTATUS(status) != 0:
|
||||
gen_log.warning(
|
||||
"child %d (pid %d) exited with status %d, restarting",
|
||||
id,
|
||||
pid,
|
||||
os.WEXITSTATUS(status),
|
||||
)
|
||||
else:
|
||||
gen_log.info("child %d (pid %d) exited normally", id, pid)
|
||||
continue
|
||||
num_restarts += 1
|
||||
if num_restarts > max_restarts:
|
||||
raise RuntimeError("Too many child restarts, giving up")
|
||||
new_id = start_child(id)
|
||||
if new_id is not None:
|
||||
return new_id
|
||||
# All child processes exited cleanly, so exit the master process
|
||||
# instead of just returning to right after the call to
|
||||
# fork_processes (which will probably just start up another IOLoop
|
||||
# unless the caller checks the return value).
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def task_id() -> Optional[int]:
|
||||
"""Returns the current task id, if any.
|
||||
|
||||
Returns None if this process was not created by `fork_processes`.
|
||||
"""
|
||||
global _task_id
|
||||
return _task_id
|
||||
|
||||
|
||||
class Subprocess(object):
|
||||
"""Wraps ``subprocess.Popen`` with IOStream support.
|
||||
|
||||
The constructor is the same as ``subprocess.Popen`` with the following
|
||||
additions:
|
||||
|
||||
* ``stdin``, ``stdout``, and ``stderr`` may have the value
|
||||
``tornado.process.Subprocess.STREAM``, which will make the corresponding
|
||||
attribute of the resulting Subprocess a `.PipeIOStream`. If this option
|
||||
is used, the caller is responsible for closing the streams when done
|
||||
with them.
|
||||
|
||||
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
|
||||
``wait_for_exit`` methods do not work on Windows. There is
|
||||
therefore no reason to use this class instead of
|
||||
``subprocess.Popen`` on that platform.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
|
||||
"""
|
||||
|
||||
STREAM = object()
|
||||
|
||||
_initialized = False
|
||||
_waiting = {} # type: ignore
|
||||
_old_sigchld = None
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
self.io_loop = ioloop.IOLoop.current()
|
||||
# All FDs we create should be closed on error; those in to_close
|
||||
# should be closed in the parent process on success.
|
||||
pipe_fds = [] # type: List[int]
|
||||
to_close = [] # type: List[int]
|
||||
if kwargs.get("stdin") is Subprocess.STREAM:
|
||||
in_r, in_w = os.pipe()
|
||||
kwargs["stdin"] = in_r
|
||||
pipe_fds.extend((in_r, in_w))
|
||||
to_close.append(in_r)
|
||||
self.stdin = PipeIOStream(in_w)
|
||||
if kwargs.get("stdout") is Subprocess.STREAM:
|
||||
out_r, out_w = os.pipe()
|
||||
kwargs["stdout"] = out_w
|
||||
pipe_fds.extend((out_r, out_w))
|
||||
to_close.append(out_w)
|
||||
self.stdout = PipeIOStream(out_r)
|
||||
if kwargs.get("stderr") is Subprocess.STREAM:
|
||||
err_r, err_w = os.pipe()
|
||||
kwargs["stderr"] = err_w
|
||||
pipe_fds.extend((err_r, err_w))
|
||||
to_close.append(err_w)
|
||||
self.stderr = PipeIOStream(err_r)
|
||||
try:
|
||||
self.proc = subprocess.Popen(*args, **kwargs)
|
||||
except:
|
||||
for fd in pipe_fds:
|
||||
os.close(fd)
|
||||
raise
|
||||
for fd in to_close:
|
||||
os.close(fd)
|
||||
self.pid = self.proc.pid
|
||||
for attr in ["stdin", "stdout", "stderr"]:
|
||||
if not hasattr(self, attr): # don't clobber streams set above
|
||||
setattr(self, attr, getattr(self.proc, attr))
|
||||
self._exit_callback = None # type: Optional[Callable[[int], None]]
|
||||
self.returncode = None # type: Optional[int]
|
||||
|
||||
def set_exit_callback(self, callback: Callable[[int], None]) -> None:
|
||||
"""Runs ``callback`` when this process exits.
|
||||
|
||||
The callback takes one argument, the return code of the process.
|
||||
|
||||
This method uses a ``SIGCHLD`` handler, which is a global setting
|
||||
and may conflict if you have other libraries trying to handle the
|
||||
same signal. If you are using more than one ``IOLoop`` it may
|
||||
be necessary to call `Subprocess.initialize` first to designate
|
||||
one ``IOLoop`` to run the signal handlers.
|
||||
|
||||
In many cases a close callback on the stdout or stderr streams
|
||||
can be used as an alternative to an exit callback if the
|
||||
signal handler is causing a problem.
|
||||
|
||||
Availability: Unix
|
||||
"""
|
||||
self._exit_callback = callback
|
||||
Subprocess.initialize()
|
||||
Subprocess._waiting[self.pid] = self
|
||||
Subprocess._try_cleanup_process(self.pid)
|
||||
|
||||
def wait_for_exit(self, raise_error: bool = True) -> "Future[int]":
|
||||
"""Returns a `.Future` which resolves when the process exits.
|
||||
|
||||
Usage::
|
||||
|
||||
ret = yield proc.wait_for_exit()
|
||||
|
||||
This is a coroutine-friendly alternative to `set_exit_callback`
|
||||
(and a replacement for the blocking `subprocess.Popen.wait`).
|
||||
|
||||
By default, raises `subprocess.CalledProcessError` if the process
|
||||
has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
|
||||
to suppress this behavior and return the exit status without raising.
|
||||
|
||||
.. versionadded:: 4.2
|
||||
|
||||
Availability: Unix
|
||||
"""
|
||||
future = Future() # type: Future[int]
|
||||
|
||||
def callback(ret: int) -> None:
|
||||
if ret != 0 and raise_error:
|
||||
# Unfortunately we don't have the original args any more.
|
||||
future_set_exception_unless_cancelled(
|
||||
future, CalledProcessError(ret, "unknown")
|
||||
)
|
||||
else:
|
||||
future_set_result_unless_cancelled(future, ret)
|
||||
|
||||
self.set_exit_callback(callback)
|
||||
return future
|
||||
|
||||
@classmethod
|
||||
def initialize(cls) -> None:
|
||||
"""Initializes the ``SIGCHLD`` handler.
|
||||
|
||||
The signal handler is run on an `.IOLoop` to avoid locking issues.
|
||||
Note that the `.IOLoop` used for signal handling need not be the
|
||||
same one used by individual Subprocess objects (as long as the
|
||||
``IOLoops`` are each running in separate threads).
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been
|
||||
removed.
|
||||
|
||||
Availability: Unix
|
||||
"""
|
||||
if cls._initialized:
|
||||
return
|
||||
io_loop = ioloop.IOLoop.current()
|
||||
cls._old_sigchld = signal.signal(
|
||||
signal.SIGCHLD,
|
||||
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup),
|
||||
)
|
||||
cls._initialized = True
|
||||
|
||||
@classmethod
|
||||
def uninitialize(cls) -> None:
|
||||
"""Removes the ``SIGCHLD`` handler."""
|
||||
if not cls._initialized:
|
||||
return
|
||||
signal.signal(signal.SIGCHLD, cls._old_sigchld)
|
||||
cls._initialized = False
|
||||
|
||||
@classmethod
|
||||
def _cleanup(cls) -> None:
|
||||
for pid in list(cls._waiting.keys()): # make a copy
|
||||
cls._try_cleanup_process(pid)
|
||||
|
||||
@classmethod
|
||||
def _try_cleanup_process(cls, pid: int) -> None:
|
||||
try:
|
||||
ret_pid, status = os.waitpid(pid, os.WNOHANG) # type: ignore
|
||||
except ChildProcessError:
|
||||
return
|
||||
if ret_pid == 0:
|
||||
return
|
||||
assert ret_pid == pid
|
||||
subproc = cls._waiting.pop(pid)
|
||||
subproc.io_loop.add_callback_from_signal(subproc._set_returncode, status)
|
||||
|
||||
def _set_returncode(self, status: int) -> None:
|
||||
if sys.platform == "win32":
|
||||
self.returncode = -1
|
||||
else:
|
||||
if os.WIFSIGNALED(status):
|
||||
self.returncode = -os.WTERMSIG(status)
|
||||
else:
|
||||
assert os.WIFEXITED(status)
|
||||
self.returncode = os.WEXITSTATUS(status)
|
||||
# We've taken over wait() duty from the subprocess.Popen
|
||||
# object. If we don't inform it of the process's return code,
|
||||
# it will log a warning at destruction in python 3.6+.
|
||||
self.proc.returncode = self.returncode
|
||||
if self._exit_callback:
|
||||
callback = self._exit_callback
|
||||
self._exit_callback = None
|
||||
callback(self.returncode)
|
0
venv/Lib/site-packages/tornado/py.typed
Normal file
0
venv/Lib/site-packages/tornado/py.typed
Normal file
414
venv/Lib/site-packages/tornado/queues.py
Normal file
414
venv/Lib/site-packages/tornado/queues.py
Normal file
|
@ -0,0 +1,414 @@
|
|||
# Copyright 2015 The Tornado Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Asynchronous queues for coroutines. These classes are very similar
|
||||
to those provided in the standard library's `asyncio package
|
||||
<https://docs.python.org/3/library/asyncio-queue.html>`_.
|
||||
|
||||
.. warning::
|
||||
|
||||
Unlike the standard library's `queue` module, the classes defined here
|
||||
are *not* thread-safe. To use these queues from another thread,
|
||||
use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
|
||||
before calling any queue methods.
|
||||
|
||||
"""
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import heapq
|
||||
|
||||
from tornado import gen, ioloop
|
||||
from tornado.concurrent import Future, future_set_result_unless_cancelled
|
||||
from tornado.locks import Event
|
||||
|
||||
from typing import Union, TypeVar, Generic, Awaitable, Optional
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Deque, Tuple, Any # noqa: F401
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
__all__ = ["Queue", "PriorityQueue", "LifoQueue", "QueueFull", "QueueEmpty"]
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
"""Raised by `.Queue.get_nowait` when the queue has no items."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def _set_timeout(
|
||||
future: Future, timeout: Union[None, float, datetime.timedelta]
|
||||
) -> None:
|
||||
if timeout:
|
||||
|
||||
def on_timeout() -> None:
|
||||
if not future.done():
|
||||
future.set_exception(gen.TimeoutError())
|
||||
|
||||
io_loop = ioloop.IOLoop.current()
|
||||
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
|
||||
future.add_done_callback(lambda _: io_loop.remove_timeout(timeout_handle))
|
||||
|
||||
|
||||
class _QueueIterator(Generic[_T]):
|
||||
def __init__(self, q: "Queue[_T]") -> None:
|
||||
self.q = q
|
||||
|
||||
def __anext__(self) -> Awaitable[_T]:
|
||||
return self.q.get()
|
||||
|
||||
|
||||
class Queue(Generic[_T]):
|
||||
"""Coordinate producer and consumer coroutines.
|
||||
|
||||
If maxsize is 0 (the default) the queue size is unbounded.
|
||||
|
||||
.. testcode::
|
||||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.queues import Queue
|
||||
|
||||
q = Queue(maxsize=2)
|
||||
|
||||
async def consumer():
|
||||
async for item in q:
|
||||
try:
|
||||
print('Doing work on %s' % item)
|
||||
await gen.sleep(0.01)
|
||||
finally:
|
||||
q.task_done()
|
||||
|
||||
async def producer():
|
||||
for item in range(5):
|
||||
await q.put(item)
|
||||
print('Put %s' % item)
|
||||
|
||||
async def main():
|
||||
# Start consumer without waiting (since it never finishes).
|
||||
IOLoop.current().spawn_callback(consumer)
|
||||
await producer() # Wait for producer to put all tasks.
|
||||
await q.join() # Wait for consumer to finish all tasks.
|
||||
print('Done')
|
||||
|
||||
IOLoop.current().run_sync(main)
|
||||
|
||||
.. testoutput::
|
||||
|
||||
Put 0
|
||||
Put 1
|
||||
Doing work on 0
|
||||
Put 2
|
||||
Doing work on 1
|
||||
Put 3
|
||||
Doing work on 2
|
||||
Put 4
|
||||
Doing work on 3
|
||||
Doing work on 4
|
||||
Done
|
||||
|
||||
|
||||
In versions of Python without native coroutines (before 3.5),
|
||||
``consumer()`` could be written as::
|
||||
|
||||
@gen.coroutine
|
||||
def consumer():
|
||||
while True:
|
||||
item = yield q.get()
|
||||
try:
|
||||
print('Doing work on %s' % item)
|
||||
yield gen.sleep(0.01)
|
||||
finally:
|
||||
q.task_done()
|
||||
|
||||
.. versionchanged:: 4.3
|
||||
Added ``async for`` support in Python 3.5.
|
||||
|
||||
"""
|
||||
|
||||
# Exact type depends on subclass. Could be another generic
|
||||
# parameter and use protocols to be more precise here.
|
||||
_queue = None # type: Any
|
||||
|
||||
def __init__(self, maxsize: int = 0) -> None:
|
||||
if maxsize is None:
|
||||
raise TypeError("maxsize can't be None")
|
||||
|
||||
if maxsize < 0:
|
||||
raise ValueError("maxsize can't be negative")
|
||||
|
||||
self._maxsize = maxsize
|
||||
self._init()
|
||||
self._getters = collections.deque([]) # type: Deque[Future[_T]]
|
||||
self._putters = collections.deque([]) # type: Deque[Tuple[_T, Future[None]]]
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = Event()
|
||||
self._finished.set()
|
||||
|
||||
@property
|
||||
def maxsize(self) -> int:
|
||||
"""Number of items allowed in the queue."""
|
||||
return self._maxsize
|
||||
|
||||
def qsize(self) -> int:
|
||||
"""Number of items in the queue."""
|
||||
return len(self._queue)
|
||||
|
||||
def empty(self) -> bool:
|
||||
return not self._queue
|
||||
|
||||
def full(self) -> bool:
|
||||
if self.maxsize == 0:
|
||||
return False
|
||||
else:
|
||||
return self.qsize() >= self.maxsize
|
||||
|
||||
def put(
|
||||
self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> "Future[None]":
|
||||
"""Put an item into the queue, perhaps waiting until there is room.
|
||||
|
||||
Returns a Future, which raises `tornado.util.TimeoutError` after a
|
||||
timeout.
|
||||
|
||||
``timeout`` may be a number denoting a time (on the same
|
||||
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
|
||||
`datetime.timedelta` object for a deadline relative to the
|
||||
current time.
|
||||
"""
|
||||
future = Future() # type: Future[None]
|
||||
try:
|
||||
self.put_nowait(item)
|
||||
except QueueFull:
|
||||
self._putters.append((item, future))
|
||||
_set_timeout(future, timeout)
|
||||
else:
|
||||
future.set_result(None)
|
||||
return future
|
||||
|
||||
def put_nowait(self, item: _T) -> None:
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
If no free slot is immediately available, raise `QueueFull`.
|
||||
"""
|
||||
self._consume_expired()
|
||||
if self._getters:
|
||||
assert self.empty(), "queue non-empty, why are getters waiting?"
|
||||
getter = self._getters.popleft()
|
||||
self.__put_internal(item)
|
||||
future_set_result_unless_cancelled(getter, self._get())
|
||||
elif self.full():
|
||||
raise QueueFull
|
||||
else:
|
||||
self.__put_internal(item)
|
||||
|
||||
def get(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[_T]:
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
Returns an awaitable which resolves once an item is available, or raises
|
||||
`tornado.util.TimeoutError` after a timeout.
|
||||
|
||||
``timeout`` may be a number denoting a time (on the same
|
||||
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
|
||||
`datetime.timedelta` object for a deadline relative to the
|
||||
current time.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``timeout`` argument of this method differs from that
|
||||
of the standard library's `queue.Queue.get`. That method
|
||||
interprets numeric values as relative timeouts; this one
|
||||
interprets them as absolute deadlines and requires
|
||||
``timedelta`` objects for relative timeouts (consistent
|
||||
with other timeouts in Tornado).
|
||||
|
||||
"""
|
||||
future = Future() # type: Future[_T]
|
||||
try:
|
||||
future.set_result(self.get_nowait())
|
||||
except QueueEmpty:
|
||||
self._getters.append(future)
|
||||
_set_timeout(future, timeout)
|
||||
return future
|
||||
|
||||
def get_nowait(self) -> _T:
|
||||
"""Remove and return an item from the queue without blocking.
|
||||
|
||||
Return an item if one is immediately available, else raise
|
||||
`QueueEmpty`.
|
||||
"""
|
||||
self._consume_expired()
|
||||
if self._putters:
|
||||
assert self.full(), "queue not full, why are putters waiting?"
|
||||
item, putter = self._putters.popleft()
|
||||
self.__put_internal(item)
|
||||
future_set_result_unless_cancelled(putter, None)
|
||||
return self._get()
|
||||
elif self.qsize():
|
||||
return self._get()
|
||||
else:
|
||||
raise QueueEmpty
|
||||
|
||||
def task_done(self) -> None:
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each `.get` used to fetch a task, a
|
||||
subsequent call to `.task_done` tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a `.join` is blocking, it resumes when all items have been
|
||||
processed; that is, when every `.put` is matched by a `.task_done`.
|
||||
|
||||
Raises `ValueError` if called more times than `.put`.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError("task_done() called too many times")
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
def join(
|
||||
self, timeout: Optional[Union[float, datetime.timedelta]] = None
|
||||
) -> Awaitable[None]:
|
||||
"""Block until all items in the queue are processed.
|
||||
|
||||
Returns an awaitable, which raises `tornado.util.TimeoutError` after a
|
||||
timeout.
|
||||
"""
|
||||
return self._finished.wait(timeout)
|
||||
|
||||
def __aiter__(self) -> _QueueIterator[_T]:
|
||||
return _QueueIterator(self)
|
||||
|
||||
# These three are overridable in subclasses.
|
||||
def _init(self) -> None:
|
||||
self._queue = collections.deque()
|
||||
|
||||
def _get(self) -> _T:
|
||||
return self._queue.popleft()
|
||||
|
||||
def _put(self, item: _T) -> None:
|
||||
self._queue.append(item)
|
||||
|
||||
# End of the overridable methods.
|
||||
|
||||
def __put_internal(self, item: _T) -> None:
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
self._put(item)
|
||||
|
||||
def _consume_expired(self) -> None:
|
||||
# Remove timed-out waiters.
|
||||
while self._putters and self._putters[0][1].done():
|
||||
self._putters.popleft()
|
||||
|
||||
while self._getters and self._getters[0].done():
|
||||
self._getters.popleft()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<%s at %s %s>" % (type(self).__name__, hex(id(self)), self._format())
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "<%s %s>" % (type(self).__name__, self._format())
|
||||
|
||||
def _format(self) -> str:
|
||||
result = "maxsize=%r" % (self.maxsize,)
|
||||
if getattr(self, "_queue", None):
|
||||
result += " queue=%r" % self._queue
|
||||
if self._getters:
|
||||
result += " getters[%s]" % len(self._getters)
|
||||
if self._putters:
|
||||
result += " putters[%s]" % len(self._putters)
|
||||
if self._unfinished_tasks:
|
||||
result += " tasks=%s" % self._unfinished_tasks
|
||||
return result
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A `.Queue` that retrieves entries in priority order, lowest first.
|
||||
|
||||
Entries are typically tuples like ``(priority number, data)``.
|
||||
|
||||
.. testcode::
|
||||
|
||||
from tornado.queues import PriorityQueue
|
||||
|
||||
q = PriorityQueue()
|
||||
q.put((1, 'medium-priority item'))
|
||||
q.put((0, 'high-priority item'))
|
||||
q.put((10, 'low-priority item'))
|
||||
|
||||
print(q.get_nowait())
|
||||
print(q.get_nowait())
|
||||
print(q.get_nowait())
|
||||
|
||||
.. testoutput::
|
||||
|
||||
(0, 'high-priority item')
|
||||
(1, 'medium-priority item')
|
||||
(10, 'low-priority item')
|
||||
"""
|
||||
|
||||
def _init(self) -> None:
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item: _T) -> None:
|
||||
heapq.heappush(self._queue, item)
|
||||
|
||||
def _get(self) -> _T:
|
||||
return heapq.heappop(self._queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
"""A `.Queue` that retrieves the most recently put items first.
|
||||
|
||||
.. testcode::
|
||||
|
||||
from tornado.queues import LifoQueue
|
||||
|
||||
q = LifoQueue()
|
||||
q.put(3)
|
||||
q.put(2)
|
||||
q.put(1)
|
||||
|
||||
print(q.get_nowait())
|
||||
print(q.get_nowait())
|
||||
print(q.get_nowait())
|
||||
|
||||
.. testoutput::
|
||||
|
||||
1
|
||||
2
|
||||
3
|
||||
"""
|
||||
|
||||
def _init(self) -> None:
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item: _T) -> None:
|
||||
self._queue.append(item)
|
||||
|
||||
def _get(self) -> _T:
|
||||
return self._queue.pop()
|
717
venv/Lib/site-packages/tornado/routing.py
Normal file
717
venv/Lib/site-packages/tornado/routing.py
Normal file
|
@ -0,0 +1,717 @@
|
|||
# Copyright 2015 The Tornado Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Flexible routing implementation.
|
||||
|
||||
Tornado routes HTTP requests to appropriate handlers using `Router`
|
||||
class implementations. The `tornado.web.Application` class is a
|
||||
`Router` implementation and may be used directly, or the classes in
|
||||
this module may be used for additional flexibility. The `RuleRouter`
|
||||
class can match on more criteria than `.Application`, or the `Router`
|
||||
interface can be subclassed for maximum customization.
|
||||
|
||||
`Router` interface extends `~.httputil.HTTPServerConnectionDelegate`
|
||||
to provide additional routing capabilities. This also means that any
|
||||
`Router` implementation can be used directly as a ``request_callback``
|
||||
for `~.httpserver.HTTPServer` constructor.
|
||||
|
||||
`Router` subclass must implement a ``find_handler`` method to provide
|
||||
a suitable `~.httputil.HTTPMessageDelegate` instance to handle the
|
||||
request:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class CustomRouter(Router):
|
||||
def find_handler(self, request, **kwargs):
|
||||
# some routing logic providing a suitable HTTPMessageDelegate instance
|
||||
return MessageDelegate(request.connection)
|
||||
|
||||
class MessageDelegate(HTTPMessageDelegate):
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
|
||||
def finish(self):
|
||||
self.connection.write_headers(
|
||||
ResponseStartLine("HTTP/1.1", 200, "OK"),
|
||||
HTTPHeaders({"Content-Length": "2"}),
|
||||
b"OK")
|
||||
self.connection.finish()
|
||||
|
||||
router = CustomRouter()
|
||||
server = HTTPServer(router)
|
||||
|
||||
The main responsibility of `Router` implementation is to provide a
|
||||
mapping from a request to `~.httputil.HTTPMessageDelegate` instance
|
||||
that will handle this request. In the example above we can see that
|
||||
routing is possible even without instantiating an `~.web.Application`.
|
||||
|
||||
For routing to `~.web.RequestHandler` implementations we need an
|
||||
`~.web.Application` instance. `~.web.Application.get_handler_delegate`
|
||||
provides a convenient way to create `~.httputil.HTTPMessageDelegate`
|
||||
for a given request and `~.web.RequestHandler`.
|
||||
|
||||
Here is a simple example of how we can we route to
|
||||
`~.web.RequestHandler` subclasses by HTTP method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
resources = {}
|
||||
|
||||
class GetResource(RequestHandler):
|
||||
def get(self, path):
|
||||
if path not in resources:
|
||||
raise HTTPError(404)
|
||||
|
||||
self.finish(resources[path])
|
||||
|
||||
class PostResource(RequestHandler):
|
||||
def post(self, path):
|
||||
resources[path] = self.request.body
|
||||
|
||||
class HTTPMethodRouter(Router):
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def find_handler(self, request, **kwargs):
|
||||
handler = GetResource if request.method == "GET" else PostResource
|
||||
return self.app.get_handler_delegate(request, handler, path_args=[request.path])
|
||||
|
||||
router = HTTPMethodRouter(Application())
|
||||
server = HTTPServer(router)
|
||||
|
||||
`ReversibleRouter` interface adds the ability to distinguish between
|
||||
the routes and reverse them to the original urls using route's name
|
||||
and additional arguments. `~.web.Application` is itself an
|
||||
implementation of `ReversibleRouter` class.
|
||||
|
||||
`RuleRouter` and `ReversibleRuleRouter` are implementations of
|
||||
`Router` and `ReversibleRouter` interfaces and can be used for
|
||||
creating rule-based routing configurations.
|
||||
|
||||
Rules are instances of `Rule` class. They contain a `Matcher`, which
|
||||
provides the logic for determining whether the rule is a match for a
|
||||
particular request and a target, which can be one of the following.
|
||||
|
||||
1) An instance of `~.httputil.HTTPServerConnectionDelegate`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
router = RuleRouter([
|
||||
Rule(PathMatches("/handler"), ConnectionDelegate()),
|
||||
# ... more rules
|
||||
])
|
||||
|
||||
class ConnectionDelegate(HTTPServerConnectionDelegate):
|
||||
def start_request(self, server_conn, request_conn):
|
||||
return MessageDelegate(request_conn)
|
||||
|
||||
2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
router = RuleRouter([
|
||||
Rule(PathMatches("/callable"), request_callable)
|
||||
])
|
||||
|
||||
def request_callable(request):
|
||||
request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK")
|
||||
request.finish()
|
||||
|
||||
3) Another `Router` instance:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
router = RuleRouter([
|
||||
Rule(PathMatches("/router.*"), CustomRouter())
|
||||
])
|
||||
|
||||
Of course a nested `RuleRouter` or a `~.web.Application` is allowed:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
router = RuleRouter([
|
||||
Rule(HostMatches("example.com"), RuleRouter([
|
||||
Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)])),
|
||||
]))
|
||||
])
|
||||
|
||||
server = HTTPServer(router)
|
||||
|
||||
In the example below `RuleRouter` is used to route between applications:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
app1 = Application([
|
||||
(r"/app1/handler", Handler1),
|
||||
# other handlers ...
|
||||
])
|
||||
|
||||
app2 = Application([
|
||||
(r"/app2/handler", Handler2),
|
||||
# other handlers ...
|
||||
])
|
||||
|
||||
router = RuleRouter([
|
||||
Rule(PathMatches("/app1.*"), app1),
|
||||
Rule(PathMatches("/app2.*"), app2)
|
||||
])
|
||||
|
||||
server = HTTPServer(router)
|
||||
|
||||
For more information on application-level routing see docs for `~.web.Application`.
|
||||
|
||||
.. versionadded:: 4.5
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
from functools import partial
|
||||
|
||||
from tornado import httputil
|
||||
from tornado.httpserver import _CallableAdapter
|
||||
from tornado.escape import url_escape, url_unescape, utf8
|
||||
from tornado.log import app_log
|
||||
from tornado.util import basestring_type, import_object, re_unescape, unicode_type
|
||||
|
||||
from typing import Any, Union, Optional, Awaitable, List, Dict, Pattern, Tuple, overload
|
||||
|
||||
|
||||
class Router(httputil.HTTPServerConnectionDelegate):
|
||||
"""Abstract router interface."""
|
||||
|
||||
def find_handler(
|
||||
self, request: httputil.HTTPServerRequest, **kwargs: Any
|
||||
) -> Optional[httputil.HTTPMessageDelegate]:
|
||||
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
|
||||
that can serve the request.
|
||||
Routing implementations may pass additional kwargs to extend the routing logic.
|
||||
|
||||
:arg httputil.HTTPServerRequest request: current HTTP request.
|
||||
:arg kwargs: additional keyword arguments passed by routing implementation.
|
||||
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
|
||||
process the request.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def start_request(
|
||||
self, server_conn: object, request_conn: httputil.HTTPConnection
|
||||
) -> httputil.HTTPMessageDelegate:
|
||||
return _RoutingDelegate(self, server_conn, request_conn)
|
||||
|
||||
|
||||
class ReversibleRouter(Router):
|
||||
"""Abstract router interface for routers that can handle named routes
|
||||
and support reversing them to original urls.
|
||||
"""
|
||||
|
||||
def reverse_url(self, name: str, *args: Any) -> Optional[str]:
|
||||
"""Returns url string for a given route name and arguments
|
||||
or ``None`` if no match is found.
|
||||
|
||||
:arg str name: route name.
|
||||
:arg args: url parameters.
|
||||
:returns: parametrized url string for a given route name (or ``None``).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class _RoutingDelegate(httputil.HTTPMessageDelegate):
|
||||
def __init__(
|
||||
self, router: Router, server_conn: object, request_conn: httputil.HTTPConnection
|
||||
) -> None:
|
||||
self.server_conn = server_conn
|
||||
self.request_conn = request_conn
|
||||
self.delegate = None # type: Optional[httputil.HTTPMessageDelegate]
|
||||
self.router = router # type: Router
|
||||
|
||||
def headers_received(
|
||||
self,
|
||||
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> Optional[Awaitable[None]]:
|
||||
assert isinstance(start_line, httputil.RequestStartLine)
|
||||
request = httputil.HTTPServerRequest(
|
||||
connection=self.request_conn,
|
||||
server_connection=self.server_conn,
|
||||
start_line=start_line,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
self.delegate = self.router.find_handler(request)
|
||||
if self.delegate is None:
|
||||
app_log.debug(
|
||||
"Delegate for %s %s request not found",
|
||||
start_line.method,
|
||||
start_line.path,
|
||||
)
|
||||
self.delegate = _DefaultMessageDelegate(self.request_conn)
|
||||
|
||||
return self.delegate.headers_received(start_line, headers)
|
||||
|
||||
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
|
||||
assert self.delegate is not None
|
||||
return self.delegate.data_received(chunk)
|
||||
|
||||
def finish(self) -> None:
|
||||
assert self.delegate is not None
|
||||
self.delegate.finish()
|
||||
|
||||
def on_connection_close(self) -> None:
|
||||
assert self.delegate is not None
|
||||
self.delegate.on_connection_close()
|
||||
|
||||
|
||||
class _DefaultMessageDelegate(httputil.HTTPMessageDelegate):
|
||||
def __init__(self, connection: httputil.HTTPConnection) -> None:
|
||||
self.connection = connection
|
||||
|
||||
def finish(self) -> None:
|
||||
self.connection.write_headers(
|
||||
httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"),
|
||||
httputil.HTTPHeaders(),
|
||||
)
|
||||
self.connection.finish()
|
||||
|
||||
|
||||
# _RuleList can either contain pre-constructed Rules or a sequence of
|
||||
# arguments to be passed to the Rule constructor.
|
||||
_RuleList = List[
|
||||
Union[
|
||||
"Rule",
|
||||
List[Any], # Can't do detailed typechecking of lists.
|
||||
Tuple[Union[str, "Matcher"], Any],
|
||||
Tuple[Union[str, "Matcher"], Any, Dict[str, Any]],
|
||||
Tuple[Union[str, "Matcher"], Any, Dict[str, Any], str],
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class RuleRouter(Router):
|
||||
"""Rule-based router implementation."""
|
||||
|
||||
def __init__(self, rules: Optional[_RuleList] = None) -> None:
|
||||
"""Constructs a router from an ordered list of rules::
|
||||
|
||||
RuleRouter([
|
||||
Rule(PathMatches("/handler"), Target),
|
||||
# ... more rules
|
||||
])
|
||||
|
||||
You can also omit explicit `Rule` constructor and use tuples of arguments::
|
||||
|
||||
RuleRouter([
|
||||
(PathMatches("/handler"), Target),
|
||||
])
|
||||
|
||||
`PathMatches` is a default matcher, so the example above can be simplified::
|
||||
|
||||
RuleRouter([
|
||||
("/handler", Target),
|
||||
])
|
||||
|
||||
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
|
||||
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable,
|
||||
accepting a request argument.
|
||||
|
||||
:arg rules: a list of `Rule` instances or tuples of `Rule`
|
||||
constructor arguments.
|
||||
"""
|
||||
self.rules = [] # type: List[Rule]
|
||||
if rules:
|
||||
self.add_rules(rules)
|
||||
|
||||
def add_rules(self, rules: _RuleList) -> None:
|
||||
"""Appends new rules to the router.
|
||||
|
||||
:arg rules: a list of Rule instances (or tuples of arguments, which are
|
||||
passed to Rule constructor).
|
||||
"""
|
||||
for rule in rules:
|
||||
if isinstance(rule, (tuple, list)):
|
||||
assert len(rule) in (2, 3, 4)
|
||||
if isinstance(rule[0], basestring_type):
|
||||
rule = Rule(PathMatches(rule[0]), *rule[1:])
|
||||
else:
|
||||
rule = Rule(*rule)
|
||||
|
||||
self.rules.append(self.process_rule(rule))
|
||||
|
||||
def process_rule(self, rule: "Rule") -> "Rule":
|
||||
"""Override this method for additional preprocessing of each rule.
|
||||
|
||||
:arg Rule rule: a rule to be processed.
|
||||
:returns: the same or modified Rule instance.
|
||||
"""
|
||||
return rule
|
||||
|
||||
def find_handler(
|
||||
self, request: httputil.HTTPServerRequest, **kwargs: Any
|
||||
) -> Optional[httputil.HTTPMessageDelegate]:
|
||||
for rule in self.rules:
|
||||
target_params = rule.matcher.match(request)
|
||||
if target_params is not None:
|
||||
if rule.target_kwargs:
|
||||
target_params["target_kwargs"] = rule.target_kwargs
|
||||
|
||||
delegate = self.get_target_delegate(
|
||||
rule.target, request, **target_params
|
||||
)
|
||||
|
||||
if delegate is not None:
|
||||
return delegate
|
||||
|
||||
return None
|
||||
|
||||
def get_target_delegate(
|
||||
self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any
|
||||
) -> Optional[httputil.HTTPMessageDelegate]:
|
||||
"""Returns an instance of `~.httputil.HTTPMessageDelegate` for a
|
||||
Rule's target. This method is called by `~.find_handler` and can be
|
||||
extended to provide additional target types.
|
||||
|
||||
:arg target: a Rule's target.
|
||||
:arg httputil.HTTPServerRequest request: current request.
|
||||
:arg target_params: additional parameters that can be useful
|
||||
for `~.httputil.HTTPMessageDelegate` creation.
|
||||
"""
|
||||
if isinstance(target, Router):
|
||||
return target.find_handler(request, **target_params)
|
||||
|
||||
elif isinstance(target, httputil.HTTPServerConnectionDelegate):
|
||||
assert request.connection is not None
|
||||
return target.start_request(request.server_connection, request.connection)
|
||||
|
||||
elif callable(target):
|
||||
assert request.connection is not None
|
||||
return _CallableAdapter(
|
||||
partial(target, **target_params), request.connection
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
|
||||
"""A rule-based router that implements ``reverse_url`` method.
|
||||
|
||||
Each rule added to this router may have a ``name`` attribute that can be
|
||||
used to reconstruct an original uri. The actual reconstruction takes place
|
||||
in a rule's matcher (see `Matcher.reverse`).
|
||||
"""
|
||||
|
||||
def __init__(self, rules: Optional[_RuleList] = None) -> None:
|
||||
self.named_rules = {} # type: Dict[str, Any]
|
||||
super().__init__(rules)
|
||||
|
||||
def process_rule(self, rule: "Rule") -> "Rule":
|
||||
rule = super().process_rule(rule)
|
||||
|
||||
if rule.name:
|
||||
if rule.name in self.named_rules:
|
||||
app_log.warning(
|
||||
"Multiple handlers named %s; replacing previous value", rule.name
|
||||
)
|
||||
self.named_rules[rule.name] = rule
|
||||
|
||||
return rule
|
||||
|
||||
def reverse_url(self, name: str, *args: Any) -> Optional[str]:
|
||||
if name in self.named_rules:
|
||||
return self.named_rules[name].matcher.reverse(*args)
|
||||
|
||||
for rule in self.rules:
|
||||
if isinstance(rule.target, ReversibleRouter):
|
||||
reversed_url = rule.target.reverse_url(name, *args)
|
||||
if reversed_url is not None:
|
||||
return reversed_url
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class Rule(object):
|
||||
"""A routing rule."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
matcher: "Matcher",
|
||||
target: Any,
|
||||
target_kwargs: Optional[Dict[str, Any]] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Constructs a Rule instance.
|
||||
|
||||
:arg Matcher matcher: a `Matcher` instance used for determining
|
||||
whether the rule should be considered a match for a specific
|
||||
request.
|
||||
:arg target: a Rule's target (typically a ``RequestHandler`` or
|
||||
`~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
|
||||
depending on routing implementation).
|
||||
:arg dict target_kwargs: a dict of parameters that can be useful
|
||||
at the moment of target instantiation (for example, ``status_code``
|
||||
for a ``RequestHandler`` subclass). They end up in
|
||||
``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
|
||||
method.
|
||||
:arg str name: the name of the rule that can be used to find it
|
||||
in `ReversibleRouter.reverse_url` implementation.
|
||||
"""
|
||||
if isinstance(target, str):
|
||||
# import the Module and instantiate the class
|
||||
# Must be a fully qualified name (module.ClassName)
|
||||
target = import_object(target)
|
||||
|
||||
self.matcher = matcher # type: Matcher
|
||||
self.target = target
|
||||
self.target_kwargs = target_kwargs if target_kwargs else {}
|
||||
self.name = name
|
||||
|
||||
def reverse(self, *args: Any) -> Optional[str]:
|
||||
return self.matcher.reverse(*args)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "%s(%r, %s, kwargs=%r, name=%r)" % (
|
||||
self.__class__.__name__,
|
||||
self.matcher,
|
||||
self.target,
|
||||
self.target_kwargs,
|
||||
self.name,
|
||||
)
|
||||
|
||||
|
||||
class Matcher(object):
|
||||
"""Represents a matcher for request features."""
|
||||
|
||||
def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]:
|
||||
"""Matches current instance against the request.
|
||||
|
||||
:arg httputil.HTTPServerRequest request: current HTTP request
|
||||
:returns: a dict of parameters to be passed to the target handler
|
||||
(for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
|
||||
can be passed for proper `~.web.RequestHandler` instantiation).
|
||||
An empty dict is a valid (and common) return value to indicate a match
|
||||
when the argument-passing features are not used.
|
||||
``None`` must be returned to indicate that there is no match."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def reverse(self, *args: Any) -> Optional[str]:
|
||||
"""Reconstructs full url from matcher instance and additional arguments."""
|
||||
return None
|
||||
|
||||
|
||||
class AnyMatches(Matcher):
|
||||
"""Matches any request."""
|
||||
|
||||
def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]:
|
||||
return {}
|
||||
|
||||
|
||||
class HostMatches(Matcher):
|
||||
"""Matches requests from hosts specified by ``host_pattern`` regex."""
|
||||
|
||||
def __init__(self, host_pattern: Union[str, Pattern]) -> None:
|
||||
if isinstance(host_pattern, basestring_type):
|
||||
if not host_pattern.endswith("$"):
|
||||
host_pattern += "$"
|
||||
self.host_pattern = re.compile(host_pattern)
|
||||
else:
|
||||
self.host_pattern = host_pattern
|
||||
|
||||
def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]:
|
||||
if self.host_pattern.match(request.host_name):
|
||||
return {}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class DefaultHostMatches(Matcher):
|
||||
"""Matches requests from host that is equal to application's default_host.
|
||||
Always returns no match if ``X-Real-Ip`` header is present.
|
||||
"""
|
||||
|
||||
def __init__(self, application: Any, host_pattern: Pattern) -> None:
|
||||
self.application = application
|
||||
self.host_pattern = host_pattern
|
||||
|
||||
def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]:
|
||||
# Look for default host if not behind load balancer (for debugging)
|
||||
if "X-Real-Ip" not in request.headers:
|
||||
if self.host_pattern.match(self.application.default_host):
|
||||
return {}
|
||||
return None
|
||||
|
||||
|
||||
class PathMatches(Matcher):
|
||||
"""Matches requests with paths specified by ``path_pattern`` regex."""
|
||||
|
||||
def __init__(self, path_pattern: Union[str, Pattern]) -> None:
|
||||
if isinstance(path_pattern, basestring_type):
|
||||
if not path_pattern.endswith("$"):
|
||||
path_pattern += "$"
|
||||
self.regex = re.compile(path_pattern)
|
||||
else:
|
||||
self.regex = path_pattern
|
||||
|
||||
assert len(self.regex.groupindex) in (0, self.regex.groups), (
|
||||
"groups in url regexes must either be all named or all "
|
||||
"positional: %r" % self.regex.pattern
|
||||
)
|
||||
|
||||
self._path, self._group_count = self._find_groups()
|
||||
|
||||
def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]:
|
||||
match = self.regex.match(request.path)
|
||||
if match is None:
|
||||
return None
|
||||
if not self.regex.groups:
|
||||
return {}
|
||||
|
||||
path_args = [] # type: List[bytes]
|
||||
path_kwargs = {} # type: Dict[str, bytes]
|
||||
|
||||
# Pass matched groups to the handler. Since
|
||||
# match.groups() includes both named and
|
||||
# unnamed groups, we want to use either groups
|
||||
# or groupdict but not both.
|
||||
if self.regex.groupindex:
|
||||
path_kwargs = dict(
|
||||
(str(k), _unquote_or_none(v)) for (k, v) in match.groupdict().items()
|
||||
)
|
||||
else:
|
||||
path_args = [_unquote_or_none(s) for s in match.groups()]
|
||||
|
||||
return dict(path_args=path_args, path_kwargs=path_kwargs)
|
||||
|
||||
def reverse(self, *args: Any) -> Optional[str]:
|
||||
if self._path is None:
|
||||
raise ValueError("Cannot reverse url regex " + self.regex.pattern)
|
||||
assert len(args) == self._group_count, (
|
||||
"required number of arguments " "not found"
|
||||
)
|
||||
if not len(args):
|
||||
return self._path
|
||||
converted_args = []
|
||||
for a in args:
|
||||
if not isinstance(a, (unicode_type, bytes)):
|
||||
a = str(a)
|
||||
converted_args.append(url_escape(utf8(a), plus=False))
|
||||
return self._path % tuple(converted_args)
|
||||
|
||||
def _find_groups(self) -> Tuple[Optional[str], Optional[int]]:
|
||||
"""Returns a tuple (reverse string, group count) for a url.
|
||||
|
||||
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
|
||||
would return ('/%s/%s/', 2).
|
||||
"""
|
||||
pattern = self.regex.pattern
|
||||
if pattern.startswith("^"):
|
||||
pattern = pattern[1:]
|
||||
if pattern.endswith("$"):
|
||||
pattern = pattern[:-1]
|
||||
|
||||
if self.regex.groups != pattern.count("("):
|
||||
# The pattern is too complicated for our simplistic matching,
|
||||
# so we can't support reversing it.
|
||||
return None, None
|
||||
|
||||
pieces = []
|
||||
for fragment in pattern.split("("):
|
||||
if ")" in fragment:
|
||||
paren_loc = fragment.index(")")
|
||||
if paren_loc >= 0:
|
||||
try:
|
||||
unescaped_fragment = re_unescape(fragment[paren_loc + 1 :])
|
||||
except ValueError:
|
||||
# If we can't unescape part of it, we can't
|
||||
# reverse this url.
|
||||
return (None, None)
|
||||
pieces.append("%s" + unescaped_fragment)
|
||||
else:
|
||||
try:
|
||||
unescaped_fragment = re_unescape(fragment)
|
||||
except ValueError:
|
||||
# If we can't unescape part of it, we can't
|
||||
# reverse this url.
|
||||
return (None, None)
|
||||
pieces.append(unescaped_fragment)
|
||||
|
||||
return "".join(pieces), self.regex.groups
|
||||
|
||||
|
||||
class URLSpec(Rule):
|
||||
"""Specifies mappings between URLs and handlers.
|
||||
|
||||
.. versionchanged: 4.5
|
||||
`URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
|
||||
backwards compatibility.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
pattern: Union[str, Pattern],
|
||||
handler: Any,
|
||||
kwargs: Optional[Dict[str, Any]] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Parameters:
|
||||
|
||||
* ``pattern``: Regular expression to be matched. Any capturing
|
||||
groups in the regex will be passed in to the handler's
|
||||
get/post/etc methods as arguments (by keyword if named, by
|
||||
position if unnamed. Named and unnamed capturing groups
|
||||
may not be mixed in the same rule).
|
||||
|
||||
* ``handler``: `~.web.RequestHandler` subclass to be invoked.
|
||||
|
||||
* ``kwargs`` (optional): A dictionary of additional arguments
|
||||
to be passed to the handler's constructor.
|
||||
|
||||
* ``name`` (optional): A name for this handler. Used by
|
||||
`~.web.Application.reverse_url`.
|
||||
|
||||
"""
|
||||
matcher = PathMatches(pattern)
|
||||
super().__init__(matcher, handler, kwargs, name)
|
||||
|
||||
self.regex = matcher.regex
|
||||
self.handler_class = self.target
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "%s(%r, %s, kwargs=%r, name=%r)" % (
|
||||
self.__class__.__name__,
|
||||
self.regex.pattern,
|
||||
self.handler_class,
|
||||
self.kwargs,
|
||||
self.name,
|
||||
)
|
||||
|
||||
|
||||
@overload
|
||||
def _unquote_or_none(s: str) -> bytes:
|
||||
pass
|
||||
|
||||
|
||||
@overload # noqa: F811
|
||||
def _unquote_or_none(s: None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811
|
||||
"""None-safe wrapper around url_unescape to handle unmatched optional
|
||||
groups correctly.
|
||||
|
||||
Note that args are passed as bytes so the handler can decide what
|
||||
encoding to use.
|
||||
"""
|
||||
if s is None:
|
||||
return s
|
||||
return url_unescape(s, encoding=None, plus=False)
|
699
venv/Lib/site-packages/tornado/simple_httpclient.py
Normal file
699
venv/Lib/site-packages/tornado/simple_httpclient.py
Normal file
|
@ -0,0 +1,699 @@
|
|||
from tornado.escape import _unicode
|
||||
from tornado import gen, version
|
||||
from tornado.httpclient import (
|
||||
HTTPResponse,
|
||||
HTTPError,
|
||||
AsyncHTTPClient,
|
||||
main,
|
||||
_RequestProxy,
|
||||
HTTPRequest,
|
||||
)
|
||||
from tornado import httputil
|
||||
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import StreamClosedError, IOStream
|
||||
from tornado.netutil import (
|
||||
Resolver,
|
||||
OverrideResolver,
|
||||
_client_ssl_defaults,
|
||||
is_valid_ip,
|
||||
)
|
||||
from tornado.log import gen_log
|
||||
from tornado.tcpclient import TCPClient
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import copy
|
||||
import functools
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
from io import BytesIO
|
||||
import urllib.parse
|
||||
|
||||
from typing import Dict, Any, Callable, Optional, Type, Union
|
||||
from types import TracebackType
|
||||
import typing
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Deque, Tuple, List # noqa: F401
|
||||
|
||||
|
||||
class HTTPTimeoutError(HTTPError):
|
||||
"""Error raised by SimpleAsyncHTTPClient on timeout.
|
||||
|
||||
For historical reasons, this is a subclass of `.HTTPClientError`
|
||||
which simulates a response code of 599.
|
||||
|
||||
.. versionadded:: 5.1
|
||||
"""
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
super().__init__(599, message=message)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.message or "Timeout"
|
||||
|
||||
|
||||
class HTTPStreamClosedError(HTTPError):
|
||||
"""Error raised by SimpleAsyncHTTPClient when the underlying stream is closed.
|
||||
|
||||
When a more specific exception is available (such as `ConnectionResetError`),
|
||||
it may be raised instead of this one.
|
||||
|
||||
For historical reasons, this is a subclass of `.HTTPClientError`
|
||||
which simulates a response code of 599.
|
||||
|
||||
.. versionadded:: 5.1
|
||||
"""
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
super().__init__(599, message=message)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.message or "Stream closed"
|
||||
|
||||
|
||||
class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
"""Non-blocking HTTP client with no external dependencies.
|
||||
|
||||
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
|
||||
Some features found in the curl-based AsyncHTTPClient are not yet
|
||||
supported. In particular, proxies are not supported, connections
|
||||
are not reused, and callers cannot select the network interface to be
|
||||
used.
|
||||
"""
|
||||
|
||||
def initialize( # type: ignore
|
||||
self,
|
||||
max_clients: int = 10,
|
||||
hostname_mapping: Optional[Dict[str, str]] = None,
|
||||
max_buffer_size: int = 104857600,
|
||||
resolver: Optional[Resolver] = None,
|
||||
defaults: Optional[Dict[str, Any]] = None,
|
||||
max_header_size: Optional[int] = None,
|
||||
max_body_size: Optional[int] = None,
|
||||
) -> None:
|
||||
"""Creates a AsyncHTTPClient.
|
||||
|
||||
Only a single AsyncHTTPClient instance exists per IOLoop
|
||||
in order to provide limitations on the number of pending connections.
|
||||
``force_instance=True`` may be used to suppress this behavior.
|
||||
|
||||
Note that because of this implicit reuse, unless ``force_instance``
|
||||
is used, only the first call to the constructor actually uses
|
||||
its arguments. It is recommended to use the ``configure`` method
|
||||
instead of the constructor to ensure that arguments take effect.
|
||||
|
||||
``max_clients`` is the number of concurrent requests that can be
|
||||
in progress; when this limit is reached additional requests will be
|
||||
queued. Note that time spent waiting in this queue still counts
|
||||
against the ``request_timeout``.
|
||||
|
||||
``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
|
||||
It can be used to make local DNS changes when modifying system-wide
|
||||
settings like ``/etc/hosts`` is not possible or desirable (e.g. in
|
||||
unittests).
|
||||
|
||||
``max_buffer_size`` (default 100MB) is the number of bytes
|
||||
that can be read into memory at once. ``max_body_size``
|
||||
(defaults to ``max_buffer_size``) is the largest response body
|
||||
that the client will accept. Without a
|
||||
``streaming_callback``, the smaller of these two limits
|
||||
applies; with a ``streaming_callback`` only ``max_body_size``
|
||||
does.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
Added the ``max_body_size`` argument.
|
||||
"""
|
||||
super().initialize(defaults=defaults)
|
||||
self.max_clients = max_clients
|
||||
self.queue = (
|
||||
collections.deque()
|
||||
) # type: Deque[Tuple[object, HTTPRequest, Callable[[HTTPResponse], None]]]
|
||||
self.active = (
|
||||
{}
|
||||
) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None]]]
|
||||
self.waiting = (
|
||||
{}
|
||||
) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None], object]]
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.max_header_size = max_header_size
|
||||
self.max_body_size = max_body_size
|
||||
# TCPClient could create a Resolver for us, but we have to do it
|
||||
# ourselves to support hostname_mapping.
|
||||
if resolver:
|
||||
self.resolver = resolver
|
||||
self.own_resolver = False
|
||||
else:
|
||||
self.resolver = Resolver()
|
||||
self.own_resolver = True
|
||||
if hostname_mapping is not None:
|
||||
self.resolver = OverrideResolver(
|
||||
resolver=self.resolver, mapping=hostname_mapping
|
||||
)
|
||||
self.tcp_client = TCPClient(resolver=self.resolver)
|
||||
|
||||
def close(self) -> None:
|
||||
super().close()
|
||||
if self.own_resolver:
|
||||
self.resolver.close()
|
||||
self.tcp_client.close()
|
||||
|
||||
def fetch_impl(
|
||||
self, request: HTTPRequest, callback: Callable[[HTTPResponse], None]
|
||||
) -> None:
|
||||
key = object()
|
||||
self.queue.append((key, request, callback))
|
||||
assert request.connect_timeout is not None
|
||||
assert request.request_timeout is not None
|
||||
timeout_handle = None
|
||||
if len(self.active) >= self.max_clients:
|
||||
timeout = (
|
||||
min(request.connect_timeout, request.request_timeout)
|
||||
or request.connect_timeout
|
||||
or request.request_timeout
|
||||
) # min but skip zero
|
||||
if timeout:
|
||||
timeout_handle = self.io_loop.add_timeout(
|
||||
self.io_loop.time() + timeout,
|
||||
functools.partial(self._on_timeout, key, "in request queue"),
|
||||
)
|
||||
self.waiting[key] = (request, callback, timeout_handle)
|
||||
self._process_queue()
|
||||
if self.queue:
|
||||
gen_log.debug(
|
||||
"max_clients limit reached, request queued. "
|
||||
"%d active, %d queued requests." % (len(self.active), len(self.queue))
|
||||
)
|
||||
|
||||
def _process_queue(self) -> None:
|
||||
while self.queue and len(self.active) < self.max_clients:
|
||||
key, request, callback = self.queue.popleft()
|
||||
if key not in self.waiting:
|
||||
continue
|
||||
self._remove_timeout(key)
|
||||
self.active[key] = (request, callback)
|
||||
release_callback = functools.partial(self._release_fetch, key)
|
||||
self._handle_request(request, release_callback, callback)
|
||||
|
||||
def _connection_class(self) -> type:
|
||||
return _HTTPConnection
|
||||
|
||||
def _handle_request(
|
||||
self,
|
||||
request: HTTPRequest,
|
||||
release_callback: Callable[[], None],
|
||||
final_callback: Callable[[HTTPResponse], None],
|
||||
) -> None:
|
||||
self._connection_class()(
|
||||
self,
|
||||
request,
|
||||
release_callback,
|
||||
final_callback,
|
||||
self.max_buffer_size,
|
||||
self.tcp_client,
|
||||
self.max_header_size,
|
||||
self.max_body_size,
|
||||
)
|
||||
|
||||
def _release_fetch(self, key: object) -> None:
|
||||
del self.active[key]
|
||||
self._process_queue()
|
||||
|
||||
def _remove_timeout(self, key: object) -> None:
|
||||
if key in self.waiting:
|
||||
request, callback, timeout_handle = self.waiting[key]
|
||||
if timeout_handle is not None:
|
||||
self.io_loop.remove_timeout(timeout_handle)
|
||||
del self.waiting[key]
|
||||
|
||||
def _on_timeout(self, key: object, info: Optional[str] = None) -> None:
|
||||
"""Timeout callback of request.
|
||||
|
||||
Construct a timeout HTTPResponse when a timeout occurs.
|
||||
|
||||
:arg object key: A simple object to mark the request.
|
||||
:info string key: More detailed timeout information.
|
||||
"""
|
||||
request, callback, timeout_handle = self.waiting[key]
|
||||
self.queue.remove((key, request, callback))
|
||||
|
||||
error_message = "Timeout {0}".format(info) if info else "Timeout"
|
||||
timeout_response = HTTPResponse(
|
||||
request,
|
||||
599,
|
||||
error=HTTPTimeoutError(error_message),
|
||||
request_time=self.io_loop.time() - request.start_time,
|
||||
)
|
||||
self.io_loop.add_callback(callback, timeout_response)
|
||||
del self.waiting[key]
|
||||
|
||||
|
||||
class _HTTPConnection(httputil.HTTPMessageDelegate):
|
||||
_SUPPORTED_METHODS = set(
|
||||
["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: Optional[SimpleAsyncHTTPClient],
|
||||
request: HTTPRequest,
|
||||
release_callback: Callable[[], None],
|
||||
final_callback: Callable[[HTTPResponse], None],
|
||||
max_buffer_size: int,
|
||||
tcp_client: TCPClient,
|
||||
max_header_size: int,
|
||||
max_body_size: int,
|
||||
) -> None:
|
||||
self.io_loop = IOLoop.current()
|
||||
self.start_time = self.io_loop.time()
|
||||
self.start_wall_time = time.time()
|
||||
self.client = client
|
||||
self.request = request
|
||||
self.release_callback = release_callback
|
||||
self.final_callback = final_callback
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.tcp_client = tcp_client
|
||||
self.max_header_size = max_header_size
|
||||
self.max_body_size = max_body_size
|
||||
self.code = None # type: Optional[int]
|
||||
self.headers = None # type: Optional[httputil.HTTPHeaders]
|
||||
self.chunks = [] # type: List[bytes]
|
||||
self._decompressor = None
|
||||
# Timeout handle returned by IOLoop.add_timeout
|
||||
self._timeout = None # type: object
|
||||
self._sockaddr = None
|
||||
IOLoop.current().add_future(
|
||||
gen.convert_yielded(self.run()), lambda f: f.result()
|
||||
)
|
||||
|
||||
async def run(self) -> None:
|
||||
try:
|
||||
self.parsed = urllib.parse.urlsplit(_unicode(self.request.url))
|
||||
if self.parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Unsupported url scheme: %s" % self.request.url)
|
||||
# urlsplit results have hostname and port results, but they
|
||||
# didn't support ipv6 literals until python 2.7.
|
||||
netloc = self.parsed.netloc
|
||||
if "@" in netloc:
|
||||
userpass, _, netloc = netloc.rpartition("@")
|
||||
host, port = httputil.split_host_and_port(netloc)
|
||||
if port is None:
|
||||
port = 443 if self.parsed.scheme == "https" else 80
|
||||
if re.match(r"^\[.*\]$", host):
|
||||
# raw ipv6 addresses in urls are enclosed in brackets
|
||||
host = host[1:-1]
|
||||
self.parsed_hostname = host # save final host for _on_connect
|
||||
|
||||
if self.request.allow_ipv6 is False:
|
||||
af = socket.AF_INET
|
||||
else:
|
||||
af = socket.AF_UNSPEC
|
||||
|
||||
ssl_options = self._get_ssl_options(self.parsed.scheme)
|
||||
|
||||
source_ip = None
|
||||
if self.request.network_interface:
|
||||
if is_valid_ip(self.request.network_interface):
|
||||
source_ip = self.request.network_interface
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unrecognized IPv4 or IPv6 address for network_interface, got %r"
|
||||
% (self.request.network_interface,)
|
||||
)
|
||||
|
||||
timeout = (
|
||||
min(self.request.connect_timeout, self.request.request_timeout)
|
||||
or self.request.connect_timeout
|
||||
or self.request.request_timeout
|
||||
) # min but skip zero
|
||||
if timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + timeout,
|
||||
functools.partial(self._on_timeout, "while connecting"),
|
||||
)
|
||||
stream = await self.tcp_client.connect(
|
||||
host,
|
||||
port,
|
||||
af=af,
|
||||
ssl_options=ssl_options,
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
source_ip=source_ip,
|
||||
)
|
||||
|
||||
if self.final_callback is None:
|
||||
# final_callback is cleared if we've hit our timeout.
|
||||
stream.close()
|
||||
return
|
||||
self.stream = stream
|
||||
self.stream.set_close_callback(self.on_connection_close)
|
||||
self._remove_timeout()
|
||||
if self.final_callback is None:
|
||||
return
|
||||
if self.request.request_timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + self.request.request_timeout,
|
||||
functools.partial(self._on_timeout, "during request"),
|
||||
)
|
||||
if (
|
||||
self.request.method not in self._SUPPORTED_METHODS
|
||||
and not self.request.allow_nonstandard_methods
|
||||
):
|
||||
raise KeyError("unknown method %s" % self.request.method)
|
||||
for key in (
|
||||
"proxy_host",
|
||||
"proxy_port",
|
||||
"proxy_username",
|
||||
"proxy_password",
|
||||
"proxy_auth_mode",
|
||||
):
|
||||
if getattr(self.request, key, None):
|
||||
raise NotImplementedError("%s not supported" % key)
|
||||
if "Connection" not in self.request.headers:
|
||||
self.request.headers["Connection"] = "close"
|
||||
if "Host" not in self.request.headers:
|
||||
if "@" in self.parsed.netloc:
|
||||
self.request.headers["Host"] = self.parsed.netloc.rpartition("@")[
|
||||
-1
|
||||
]
|
||||
else:
|
||||
self.request.headers["Host"] = self.parsed.netloc
|
||||
username, password = None, None
|
||||
if self.parsed.username is not None:
|
||||
username, password = self.parsed.username, self.parsed.password
|
||||
elif self.request.auth_username is not None:
|
||||
username = self.request.auth_username
|
||||
password = self.request.auth_password or ""
|
||||
if username is not None:
|
||||
assert password is not None
|
||||
if self.request.auth_mode not in (None, "basic"):
|
||||
raise ValueError("unsupported auth_mode %s", self.request.auth_mode)
|
||||
self.request.headers["Authorization"] = "Basic " + _unicode(
|
||||
base64.b64encode(
|
||||
httputil.encode_username_password(username, password)
|
||||
)
|
||||
)
|
||||
if self.request.user_agent:
|
||||
self.request.headers["User-Agent"] = self.request.user_agent
|
||||
elif self.request.headers.get("User-Agent") is None:
|
||||
self.request.headers["User-Agent"] = "Tornado/{}".format(version)
|
||||
if not self.request.allow_nonstandard_methods:
|
||||
# Some HTTP methods nearly always have bodies while others
|
||||
# almost never do. Fail in this case unless the user has
|
||||
# opted out of sanity checks with allow_nonstandard_methods.
|
||||
body_expected = self.request.method in ("POST", "PATCH", "PUT")
|
||||
body_present = (
|
||||
self.request.body is not None
|
||||
or self.request.body_producer is not None
|
||||
)
|
||||
if (body_expected and not body_present) or (
|
||||
body_present and not body_expected
|
||||
):
|
||||
raise ValueError(
|
||||
"Body must %sbe None for method %s (unless "
|
||||
"allow_nonstandard_methods is true)"
|
||||
% ("not " if body_expected else "", self.request.method)
|
||||
)
|
||||
if self.request.expect_100_continue:
|
||||
self.request.headers["Expect"] = "100-continue"
|
||||
if self.request.body is not None:
|
||||
# When body_producer is used the caller is responsible for
|
||||
# setting Content-Length (or else chunked encoding will be used).
|
||||
self.request.headers["Content-Length"] = str(len(self.request.body))
|
||||
if (
|
||||
self.request.method == "POST"
|
||||
and "Content-Type" not in self.request.headers
|
||||
):
|
||||
self.request.headers[
|
||||
"Content-Type"
|
||||
] = "application/x-www-form-urlencoded"
|
||||
if self.request.decompress_response:
|
||||
self.request.headers["Accept-Encoding"] = "gzip"
|
||||
req_path = (self.parsed.path or "/") + (
|
||||
("?" + self.parsed.query) if self.parsed.query else ""
|
||||
)
|
||||
self.connection = self._create_connection(stream)
|
||||
start_line = httputil.RequestStartLine(self.request.method, req_path, "")
|
||||
self.connection.write_headers(start_line, self.request.headers)
|
||||
if self.request.expect_100_continue:
|
||||
await self.connection.read_response(self)
|
||||
else:
|
||||
await self._write_body(True)
|
||||
except Exception:
|
||||
if not self._handle_exception(*sys.exc_info()):
|
||||
raise
|
||||
|
||||
def _get_ssl_options(
|
||||
self, scheme: str
|
||||
) -> Union[None, Dict[str, Any], ssl.SSLContext]:
|
||||
if scheme == "https":
|
||||
if self.request.ssl_options is not None:
|
||||
return self.request.ssl_options
|
||||
# If we are using the defaults, don't construct a
|
||||
# new SSLContext.
|
||||
if (
|
||||
self.request.validate_cert
|
||||
and self.request.ca_certs is None
|
||||
and self.request.client_cert is None
|
||||
and self.request.client_key is None
|
||||
):
|
||||
return _client_ssl_defaults
|
||||
ssl_ctx = ssl.create_default_context(
|
||||
ssl.Purpose.SERVER_AUTH, cafile=self.request.ca_certs
|
||||
)
|
||||
if not self.request.validate_cert:
|
||||
ssl_ctx.check_hostname = False
|
||||
ssl_ctx.verify_mode = ssl.CERT_NONE
|
||||
if self.request.client_cert is not None:
|
||||
ssl_ctx.load_cert_chain(
|
||||
self.request.client_cert, self.request.client_key
|
||||
)
|
||||
if hasattr(ssl, "OP_NO_COMPRESSION"):
|
||||
# See netutil.ssl_options_to_context
|
||||
ssl_ctx.options |= ssl.OP_NO_COMPRESSION
|
||||
return ssl_ctx
|
||||
return None
|
||||
|
||||
def _on_timeout(self, info: Optional[str] = None) -> None:
|
||||
"""Timeout callback of _HTTPConnection instance.
|
||||
|
||||
Raise a `HTTPTimeoutError` when a timeout occurs.
|
||||
|
||||
:info string key: More detailed timeout information.
|
||||
"""
|
||||
self._timeout = None
|
||||
error_message = "Timeout {0}".format(info) if info else "Timeout"
|
||||
if self.final_callback is not None:
|
||||
self._handle_exception(
|
||||
HTTPTimeoutError, HTTPTimeoutError(error_message), None
|
||||
)
|
||||
|
||||
def _remove_timeout(self) -> None:
|
||||
if self._timeout is not None:
|
||||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = None
|
||||
|
||||
def _create_connection(self, stream: IOStream) -> HTTP1Connection:
|
||||
stream.set_nodelay(True)
|
||||
connection = HTTP1Connection(
|
||||
stream,
|
||||
True,
|
||||
HTTP1ConnectionParameters(
|
||||
no_keep_alive=True,
|
||||
max_header_size=self.max_header_size,
|
||||
max_body_size=self.max_body_size,
|
||||
decompress=bool(self.request.decompress_response),
|
||||
),
|
||||
self._sockaddr,
|
||||
)
|
||||
return connection
|
||||
|
||||
async def _write_body(self, start_read: bool) -> None:
|
||||
if self.request.body is not None:
|
||||
self.connection.write(self.request.body)
|
||||
elif self.request.body_producer is not None:
|
||||
fut = self.request.body_producer(self.connection.write)
|
||||
if fut is not None:
|
||||
await fut
|
||||
self.connection.finish()
|
||||
if start_read:
|
||||
try:
|
||||
await self.connection.read_response(self)
|
||||
except StreamClosedError:
|
||||
if not self._handle_exception(*sys.exc_info()):
|
||||
raise
|
||||
|
||||
def _release(self) -> None:
|
||||
if self.release_callback is not None:
|
||||
release_callback = self.release_callback
|
||||
self.release_callback = None # type: ignore
|
||||
release_callback()
|
||||
|
||||
def _run_callback(self, response: HTTPResponse) -> None:
|
||||
self._release()
|
||||
if self.final_callback is not None:
|
||||
final_callback = self.final_callback
|
||||
self.final_callback = None # type: ignore
|
||||
self.io_loop.add_callback(final_callback, response)
|
||||
|
||||
def _handle_exception(
|
||||
self,
|
||||
typ: "Optional[Type[BaseException]]",
|
||||
value: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
) -> bool:
|
||||
if self.final_callback:
|
||||
self._remove_timeout()
|
||||
if isinstance(value, StreamClosedError):
|
||||
if value.real_error is None:
|
||||
value = HTTPStreamClosedError("Stream closed")
|
||||
else:
|
||||
value = value.real_error
|
||||
self._run_callback(
|
||||
HTTPResponse(
|
||||
self.request,
|
||||
599,
|
||||
error=value,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
start_time=self.start_wall_time,
|
||||
)
|
||||
)
|
||||
|
||||
if hasattr(self, "stream"):
|
||||
# TODO: this may cause a StreamClosedError to be raised
|
||||
# by the connection's Future. Should we cancel the
|
||||
# connection more gracefully?
|
||||
self.stream.close()
|
||||
return True
|
||||
else:
|
||||
# If our callback has already been called, we are probably
|
||||
# catching an exception that is not caused by us but rather
|
||||
# some child of our callback. Rather than drop it on the floor,
|
||||
# pass it along, unless it's just the stream being closed.
|
||||
return isinstance(value, StreamClosedError)
|
||||
|
||||
def on_connection_close(self) -> None:
|
||||
if self.final_callback is not None:
|
||||
message = "Connection closed"
|
||||
if self.stream.error:
|
||||
raise self.stream.error
|
||||
try:
|
||||
raise HTTPStreamClosedError(message)
|
||||
except HTTPStreamClosedError:
|
||||
self._handle_exception(*sys.exc_info())
|
||||
|
||||
async def headers_received(
|
||||
self,
|
||||
first_line: Union[httputil.ResponseStartLine, httputil.RequestStartLine],
|
||||
headers: httputil.HTTPHeaders,
|
||||
) -> None:
|
||||
assert isinstance(first_line, httputil.ResponseStartLine)
|
||||
if self.request.expect_100_continue and first_line.code == 100:
|
||||
await self._write_body(False)
|
||||
return
|
||||
self.code = first_line.code
|
||||
self.reason = first_line.reason
|
||||
self.headers = headers
|
||||
|
||||
if self._should_follow_redirect():
|
||||
return
|
||||
|
||||
if self.request.header_callback is not None:
|
||||
# Reassemble the start line.
|
||||
self.request.header_callback("%s %s %s\r\n" % first_line)
|
||||
for k, v in self.headers.get_all():
|
||||
self.request.header_callback("%s: %s\r\n" % (k, v))
|
||||
self.request.header_callback("\r\n")
|
||||
|
||||
def _should_follow_redirect(self) -> bool:
|
||||
if self.request.follow_redirects:
|
||||
assert self.request.max_redirects is not None
|
||||
return (
|
||||
self.code in (301, 302, 303, 307, 308)
|
||||
and self.request.max_redirects > 0
|
||||
and self.headers is not None
|
||||
and self.headers.get("Location") is not None
|
||||
)
|
||||
return False
|
||||
|
||||
def finish(self) -> None:
|
||||
assert self.code is not None
|
||||
data = b"".join(self.chunks)
|
||||
self._remove_timeout()
|
||||
original_request = getattr(self.request, "original_request", self.request)
|
||||
if self._should_follow_redirect():
|
||||
assert isinstance(self.request, _RequestProxy)
|
||||
new_request = copy.copy(self.request.request)
|
||||
new_request.url = urllib.parse.urljoin(
|
||||
self.request.url, self.headers["Location"]
|
||||
)
|
||||
new_request.max_redirects = self.request.max_redirects - 1
|
||||
del new_request.headers["Host"]
|
||||
# https://tools.ietf.org/html/rfc7231#section-6.4
|
||||
#
|
||||
# The original HTTP spec said that after a 301 or 302
|
||||
# redirect, the request method should be preserved.
|
||||
# However, browsers implemented this by changing the
|
||||
# method to GET, and the behavior stuck. 303 redirects
|
||||
# always specified this POST-to-GET behavior, arguably
|
||||
# for *all* methods, but libcurl < 7.70 only does this
|
||||
# for POST, while libcurl >= 7.70 does it for other methods.
|
||||
if (self.code == 303 and self.request.method != "HEAD") or (
|
||||
self.code in (301, 302) and self.request.method == "POST"
|
||||
):
|
||||
new_request.method = "GET"
|
||||
new_request.body = None
|
||||
for h in [
|
||||
"Content-Length",
|
||||
"Content-Type",
|
||||
"Content-Encoding",
|
||||
"Transfer-Encoding",
|
||||
]:
|
||||
try:
|
||||
del self.request.headers[h]
|
||||
except KeyError:
|
||||
pass
|
||||
new_request.original_request = original_request
|
||||
final_callback = self.final_callback
|
||||
self.final_callback = None
|
||||
self._release()
|
||||
fut = self.client.fetch(new_request, raise_error=False)
|
||||
fut.add_done_callback(lambda f: final_callback(f.result()))
|
||||
self._on_end_request()
|
||||
return
|
||||
if self.request.streaming_callback:
|
||||
buffer = BytesIO()
|
||||
else:
|
||||
buffer = BytesIO(data) # TODO: don't require one big string?
|
||||
response = HTTPResponse(
|
||||
original_request,
|
||||
self.code,
|
||||
reason=getattr(self, "reason", None),
|
||||
headers=self.headers,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
start_time=self.start_wall_time,
|
||||
buffer=buffer,
|
||||
effective_url=self.request.url,
|
||||
)
|
||||
self._run_callback(response)
|
||||
self._on_end_request()
|
||||
|
||||
def _on_end_request(self) -> None:
|
||||
self.stream.close()
|
||||
|
||||
def data_received(self, chunk: bytes) -> None:
|
||||
if self._should_follow_redirect():
|
||||
# We're going to follow a redirect so just discard the body.
|
||||
return
|
||||
if self.request.streaming_callback is not None:
|
||||
self.request.streaming_callback(chunk)
|
||||
else:
|
||||
self.chunks.append(chunk)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
|
||||
main()
|
BIN
venv/Lib/site-packages/tornado/speedups.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/tornado/speedups.cp36-win32.pyd
Normal file
Binary file not shown.
328
venv/Lib/site-packages/tornado/tcpclient.py
Normal file
328
venv/Lib/site-packages/tornado/tcpclient.py
Normal file
|
@ -0,0 +1,328 @@
|
|||
#
|
||||
# Copyright 2014 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A non-blocking TCP connection factory.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import socket
|
||||
import numbers
|
||||
import datetime
|
||||
import ssl
|
||||
|
||||
from tornado.concurrent import Future, future_add_done_callback
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream
|
||||
from tornado import gen
|
||||
from tornado.netutil import Resolver
|
||||
from tornado.gen import TimeoutError
|
||||
|
||||
from typing import Any, Union, Dict, Tuple, List, Callable, Iterator, Optional, Set
|
||||
|
||||
_INITIAL_CONNECT_TIMEOUT = 0.3
|
||||
|
||||
|
||||
class _Connector(object):
|
||||
"""A stateless implementation of the "Happy Eyeballs" algorithm.
|
||||
|
||||
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
|
||||
for when both IPv4 and IPv6 addresses are available.
|
||||
|
||||
In this implementation, we partition the addresses by family, and
|
||||
make the first connection attempt to whichever address was
|
||||
returned first by ``getaddrinfo``. If that connection fails or
|
||||
times out, we begin a connection in parallel to the first address
|
||||
of the other family. If there are additional failures we retry
|
||||
with other addresses, keeping one connection attempt per family
|
||||
in flight at a time.
|
||||
|
||||
http://tools.ietf.org/html/rfc6555
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
addrinfo: List[Tuple],
|
||||
connect: Callable[
|
||||
[socket.AddressFamily, Tuple], Tuple[IOStream, "Future[IOStream]"]
|
||||
],
|
||||
) -> None:
|
||||
self.io_loop = IOLoop.current()
|
||||
self.connect = connect
|
||||
|
||||
self.future = (
|
||||
Future()
|
||||
) # type: Future[Tuple[socket.AddressFamily, Any, IOStream]]
|
||||
self.timeout = None # type: Optional[object]
|
||||
self.connect_timeout = None # type: Optional[object]
|
||||
self.last_error = None # type: Optional[Exception]
|
||||
self.remaining = len(addrinfo)
|
||||
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
|
||||
self.streams = set() # type: Set[IOStream]
|
||||
|
||||
@staticmethod
|
||||
def split(
|
||||
addrinfo: List[Tuple],
|
||||
) -> Tuple[
|
||||
List[Tuple[socket.AddressFamily, Tuple]],
|
||||
List[Tuple[socket.AddressFamily, Tuple]],
|
||||
]:
|
||||
"""Partition the ``addrinfo`` list by address family.
|
||||
|
||||
Returns two lists. The first list contains the first entry from
|
||||
``addrinfo`` and all others with the same family, and the
|
||||
second list contains all other addresses (normally one list will
|
||||
be AF_INET and the other AF_INET6, although non-standard resolvers
|
||||
may return additional families).
|
||||
"""
|
||||
primary = []
|
||||
secondary = []
|
||||
primary_af = addrinfo[0][0]
|
||||
for af, addr in addrinfo:
|
||||
if af == primary_af:
|
||||
primary.append((af, addr))
|
||||
else:
|
||||
secondary.append((af, addr))
|
||||
return primary, secondary
|
||||
|
||||
def start(
|
||||
self,
|
||||
timeout: float = _INITIAL_CONNECT_TIMEOUT,
|
||||
connect_timeout: Optional[Union[float, datetime.timedelta]] = None,
|
||||
) -> "Future[Tuple[socket.AddressFamily, Any, IOStream]]":
|
||||
self.try_connect(iter(self.primary_addrs))
|
||||
self.set_timeout(timeout)
|
||||
if connect_timeout is not None:
|
||||
self.set_connect_timeout(connect_timeout)
|
||||
return self.future
|
||||
|
||||
def try_connect(self, addrs: Iterator[Tuple[socket.AddressFamily, Tuple]]) -> None:
|
||||
try:
|
||||
af, addr = next(addrs)
|
||||
except StopIteration:
|
||||
# We've reached the end of our queue, but the other queue
|
||||
# might still be working. Send a final error on the future
|
||||
# only when both queues are finished.
|
||||
if self.remaining == 0 and not self.future.done():
|
||||
self.future.set_exception(
|
||||
self.last_error or IOError("connection failed")
|
||||
)
|
||||
return
|
||||
stream, future = self.connect(af, addr)
|
||||
self.streams.add(stream)
|
||||
future_add_done_callback(
|
||||
future, functools.partial(self.on_connect_done, addrs, af, addr)
|
||||
)
|
||||
|
||||
def on_connect_done(
|
||||
self,
|
||||
addrs: Iterator[Tuple[socket.AddressFamily, Tuple]],
|
||||
af: socket.AddressFamily,
|
||||
addr: Tuple,
|
||||
future: "Future[IOStream]",
|
||||
) -> None:
|
||||
self.remaining -= 1
|
||||
try:
|
||||
stream = future.result()
|
||||
except Exception as e:
|
||||
if self.future.done():
|
||||
return
|
||||
# Error: try again (but remember what happened so we have an
|
||||
# error to raise in the end)
|
||||
self.last_error = e
|
||||
self.try_connect(addrs)
|
||||
if self.timeout is not None:
|
||||
# If the first attempt failed, don't wait for the
|
||||
# timeout to try an address from the secondary queue.
|
||||
self.io_loop.remove_timeout(self.timeout)
|
||||
self.on_timeout()
|
||||
return
|
||||
self.clear_timeouts()
|
||||
if self.future.done():
|
||||
# This is a late arrival; just drop it.
|
||||
stream.close()
|
||||
else:
|
||||
self.streams.discard(stream)
|
||||
self.future.set_result((af, addr, stream))
|
||||
self.close_streams()
|
||||
|
||||
def set_timeout(self, timeout: float) -> None:
|
||||
self.timeout = self.io_loop.add_timeout(
|
||||
self.io_loop.time() + timeout, self.on_timeout
|
||||
)
|
||||
|
||||
def on_timeout(self) -> None:
|
||||
self.timeout = None
|
||||
if not self.future.done():
|
||||
self.try_connect(iter(self.secondary_addrs))
|
||||
|
||||
def clear_timeout(self) -> None:
|
||||
if self.timeout is not None:
|
||||
self.io_loop.remove_timeout(self.timeout)
|
||||
|
||||
def set_connect_timeout(
|
||||
self, connect_timeout: Union[float, datetime.timedelta]
|
||||
) -> None:
|
||||
self.connect_timeout = self.io_loop.add_timeout(
|
||||
connect_timeout, self.on_connect_timeout
|
||||
)
|
||||
|
||||
def on_connect_timeout(self) -> None:
|
||||
if not self.future.done():
|
||||
self.future.set_exception(TimeoutError())
|
||||
self.close_streams()
|
||||
|
||||
def clear_timeouts(self) -> None:
|
||||
if self.timeout is not None:
|
||||
self.io_loop.remove_timeout(self.timeout)
|
||||
if self.connect_timeout is not None:
|
||||
self.io_loop.remove_timeout(self.connect_timeout)
|
||||
|
||||
def close_streams(self) -> None:
|
||||
for stream in self.streams:
|
||||
stream.close()
|
||||
|
||||
|
||||
class TCPClient(object):
|
||||
"""A non-blocking TCP connection factory.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
|
||||
"""
|
||||
|
||||
def __init__(self, resolver: Optional[Resolver] = None) -> None:
|
||||
if resolver is not None:
|
||||
self.resolver = resolver
|
||||
self._own_resolver = False
|
||||
else:
|
||||
self.resolver = Resolver()
|
||||
self._own_resolver = True
|
||||
|
||||
def close(self) -> None:
|
||||
if self._own_resolver:
|
||||
self.resolver.close()
|
||||
|
||||
async def connect(
|
||||
self,
|
||||
host: str,
|
||||
port: int,
|
||||
af: socket.AddressFamily = socket.AF_UNSPEC,
|
||||
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
|
||||
max_buffer_size: Optional[int] = None,
|
||||
source_ip: Optional[str] = None,
|
||||
source_port: Optional[int] = None,
|
||||
timeout: Optional[Union[float, datetime.timedelta]] = None,
|
||||
) -> IOStream:
|
||||
"""Connect to the given host and port.
|
||||
|
||||
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
|
||||
``ssl_options`` is not None).
|
||||
|
||||
Using the ``source_ip`` kwarg, one can specify the source
|
||||
IP address to use when establishing the connection.
|
||||
In case the user needs to resolve and
|
||||
use a specific interface, it has to be handled outside
|
||||
of Tornado as this depends very much on the platform.
|
||||
|
||||
Raises `TimeoutError` if the input future does not complete before
|
||||
``timeout``, which may be specified in any form allowed by
|
||||
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
|
||||
relative to `.IOLoop.time`)
|
||||
|
||||
Similarly, when the user requires a certain source port, it can
|
||||
be specified using the ``source_port`` arg.
|
||||
|
||||
.. versionchanged:: 4.5
|
||||
Added the ``source_ip`` and ``source_port`` arguments.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
Added the ``timeout`` argument.
|
||||
"""
|
||||
if timeout is not None:
|
||||
if isinstance(timeout, numbers.Real):
|
||||
timeout = IOLoop.current().time() + timeout
|
||||
elif isinstance(timeout, datetime.timedelta):
|
||||
timeout = IOLoop.current().time() + timeout.total_seconds()
|
||||
else:
|
||||
raise TypeError("Unsupported timeout %r" % timeout)
|
||||
if timeout is not None:
|
||||
addrinfo = await gen.with_timeout(
|
||||
timeout, self.resolver.resolve(host, port, af)
|
||||
)
|
||||
else:
|
||||
addrinfo = await self.resolver.resolve(host, port, af)
|
||||
connector = _Connector(
|
||||
addrinfo,
|
||||
functools.partial(
|
||||
self._create_stream,
|
||||
max_buffer_size,
|
||||
source_ip=source_ip,
|
||||
source_port=source_port,
|
||||
),
|
||||
)
|
||||
af, addr, stream = await connector.start(connect_timeout=timeout)
|
||||
# TODO: For better performance we could cache the (af, addr)
|
||||
# information here and re-use it on subsequent connections to
|
||||
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
|
||||
if ssl_options is not None:
|
||||
if timeout is not None:
|
||||
stream = await gen.with_timeout(
|
||||
timeout,
|
||||
stream.start_tls(
|
||||
False, ssl_options=ssl_options, server_hostname=host
|
||||
),
|
||||
)
|
||||
else:
|
||||
stream = await stream.start_tls(
|
||||
False, ssl_options=ssl_options, server_hostname=host
|
||||
)
|
||||
return stream
|
||||
|
||||
def _create_stream(
|
||||
self,
|
||||
max_buffer_size: int,
|
||||
af: socket.AddressFamily,
|
||||
addr: Tuple,
|
||||
source_ip: Optional[str] = None,
|
||||
source_port: Optional[int] = None,
|
||||
) -> Tuple[IOStream, "Future[IOStream]"]:
|
||||
# Always connect in plaintext; we'll convert to ssl if necessary
|
||||
# after one connection has completed.
|
||||
source_port_bind = source_port if isinstance(source_port, int) else 0
|
||||
source_ip_bind = source_ip
|
||||
if source_port_bind and not source_ip:
|
||||
# User required a specific port, but did not specify
|
||||
# a certain source IP, will bind to the default loopback.
|
||||
source_ip_bind = "::1" if af == socket.AF_INET6 else "127.0.0.1"
|
||||
# Trying to use the same address family as the requested af socket:
|
||||
# - 127.0.0.1 for IPv4
|
||||
# - ::1 for IPv6
|
||||
socket_obj = socket.socket(af)
|
||||
if source_port_bind or source_ip_bind:
|
||||
# If the user requires binding also to a specific IP/port.
|
||||
try:
|
||||
socket_obj.bind((source_ip_bind, source_port_bind))
|
||||
except socket.error:
|
||||
socket_obj.close()
|
||||
# Fail loudly if unable to use the IP/port.
|
||||
raise
|
||||
try:
|
||||
stream = IOStream(socket_obj, max_buffer_size=max_buffer_size)
|
||||
except socket.error as e:
|
||||
fu = Future() # type: Future[IOStream]
|
||||
fu.set_exception(e)
|
||||
return stream, fu
|
||||
else:
|
||||
return stream, stream.connect(addr)
|
334
venv/Lib/site-packages/tornado/tcpserver.py
Normal file
334
venv/Lib/site-packages/tornado/tcpserver.py
Normal file
|
@ -0,0 +1,334 @@
|
|||
#
|
||||
# Copyright 2011 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A non-blocking, single-threaded TCP server."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from tornado import gen
|
||||
from tornado.log import app_log
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream, SSLIOStream
|
||||
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
|
||||
from tornado import process
|
||||
from tornado.util import errno_from_exception
|
||||
|
||||
import typing
|
||||
from typing import Union, Dict, Any, Iterable, Optional, Awaitable
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Callable, List # noqa: F401
|
||||
|
||||
|
||||
class TCPServer(object):
|
||||
r"""A non-blocking, single-threaded TCP server.
|
||||
|
||||
To use `TCPServer`, define a subclass which overrides the `handle_stream`
|
||||
method. For example, a simple echo server could be defined like this::
|
||||
|
||||
from tornado.tcpserver import TCPServer
|
||||
from tornado.iostream import StreamClosedError
|
||||
from tornado import gen
|
||||
|
||||
class EchoServer(TCPServer):
|
||||
async def handle_stream(self, stream, address):
|
||||
while True:
|
||||
try:
|
||||
data = await stream.read_until(b"\n")
|
||||
await stream.write(data)
|
||||
except StreamClosedError:
|
||||
break
|
||||
|
||||
To make this server serve SSL traffic, send the ``ssl_options`` keyword
|
||||
argument with an `ssl.SSLContext` object. For compatibility with older
|
||||
versions of Python ``ssl_options`` may also be a dictionary of keyword
|
||||
arguments for the `ssl.wrap_socket` method.::
|
||||
|
||||
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
|
||||
os.path.join(data_dir, "mydomain.key"))
|
||||
TCPServer(ssl_options=ssl_ctx)
|
||||
|
||||
`TCPServer` initialization follows one of three patterns:
|
||||
|
||||
1. `listen`: simple single-process::
|
||||
|
||||
server = TCPServer()
|
||||
server.listen(8888)
|
||||
IOLoop.current().start()
|
||||
|
||||
2. `bind`/`start`: simple multi-process::
|
||||
|
||||
server = TCPServer()
|
||||
server.bind(8888)
|
||||
server.start(0) # Forks multiple sub-processes
|
||||
IOLoop.current().start()
|
||||
|
||||
When using this interface, an `.IOLoop` must *not* be passed
|
||||
to the `TCPServer` constructor. `start` will always start
|
||||
the server on the default singleton `.IOLoop`.
|
||||
|
||||
3. `add_sockets`: advanced multi-process::
|
||||
|
||||
sockets = bind_sockets(8888)
|
||||
tornado.process.fork_processes(0)
|
||||
server = TCPServer()
|
||||
server.add_sockets(sockets)
|
||||
IOLoop.current().start()
|
||||
|
||||
The `add_sockets` interface is more complicated, but it can be
|
||||
used with `tornado.process.fork_processes` to give you more
|
||||
flexibility in when the fork happens. `add_sockets` can
|
||||
also be used in single-process servers if you want to create
|
||||
your listening sockets in some way other than
|
||||
`~tornado.netutil.bind_sockets`.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
The ``max_buffer_size`` argument.
|
||||
|
||||
.. versionchanged:: 5.0
|
||||
The ``io_loop`` argument has been removed.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
|
||||
max_buffer_size: Optional[int] = None,
|
||||
read_chunk_size: Optional[int] = None,
|
||||
) -> None:
|
||||
self.ssl_options = ssl_options
|
||||
self._sockets = {} # type: Dict[int, socket.socket]
|
||||
self._handlers = {} # type: Dict[int, Callable[[], None]]
|
||||
self._pending_sockets = [] # type: List[socket.socket]
|
||||
self._started = False
|
||||
self._stopped = False
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.read_chunk_size = read_chunk_size
|
||||
|
||||
# Verify the SSL options. Otherwise we don't get errors until clients
|
||||
# connect. This doesn't verify that the keys are legitimate, but
|
||||
# the SSL module doesn't do that until there is a connected socket
|
||||
# which seems like too much work
|
||||
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
|
||||
# Only certfile is required: it can contain both keys
|
||||
if "certfile" not in self.ssl_options:
|
||||
raise KeyError('missing key "certfile" in ssl_options')
|
||||
|
||||
if not os.path.exists(self.ssl_options["certfile"]):
|
||||
raise ValueError(
|
||||
'certfile "%s" does not exist' % self.ssl_options["certfile"]
|
||||
)
|
||||
if "keyfile" in self.ssl_options and not os.path.exists(
|
||||
self.ssl_options["keyfile"]
|
||||
):
|
||||
raise ValueError(
|
||||
'keyfile "%s" does not exist' % self.ssl_options["keyfile"]
|
||||
)
|
||||
|
||||
def listen(self, port: int, address: str = "") -> None:
|
||||
"""Starts accepting connections on the given port.
|
||||
|
||||
This method may be called more than once to listen on multiple ports.
|
||||
`listen` takes effect immediately; it is not necessary to call
|
||||
`TCPServer.start` afterwards. It is, however, necessary to start
|
||||
the `.IOLoop`.
|
||||
"""
|
||||
sockets = bind_sockets(port, address=address)
|
||||
self.add_sockets(sockets)
|
||||
|
||||
def add_sockets(self, sockets: Iterable[socket.socket]) -> None:
|
||||
"""Makes this server start accepting connections on the given sockets.
|
||||
|
||||
The ``sockets`` parameter is a list of socket objects such as
|
||||
those returned by `~tornado.netutil.bind_sockets`.
|
||||
`add_sockets` is typically used in combination with that
|
||||
method and `tornado.process.fork_processes` to provide greater
|
||||
control over the initialization of a multi-process server.
|
||||
"""
|
||||
for sock in sockets:
|
||||
self._sockets[sock.fileno()] = sock
|
||||
self._handlers[sock.fileno()] = add_accept_handler(
|
||||
sock, self._handle_connection
|
||||
)
|
||||
|
||||
def add_socket(self, socket: socket.socket) -> None:
|
||||
"""Singular version of `add_sockets`. Takes a single socket object."""
|
||||
self.add_sockets([socket])
|
||||
|
||||
def bind(
|
||||
self,
|
||||
port: int,
|
||||
address: Optional[str] = None,
|
||||
family: socket.AddressFamily = socket.AF_UNSPEC,
|
||||
backlog: int = 128,
|
||||
reuse_port: bool = False,
|
||||
) -> None:
|
||||
"""Binds this server to the given port on the given address.
|
||||
|
||||
To start the server, call `start`. If you want to run this server
|
||||
in a single process, you can call `listen` as a shortcut to the
|
||||
sequence of `bind` and `start` calls.
|
||||
|
||||
Address may be either an IP address or hostname. If it's a hostname,
|
||||
the server will listen on all IP addresses associated with the
|
||||
name. Address may be an empty string or None to listen on all
|
||||
available interfaces. Family may be set to either `socket.AF_INET`
|
||||
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
|
||||
both will be used if available.
|
||||
|
||||
The ``backlog`` argument has the same meaning as for
|
||||
`socket.listen <socket.socket.listen>`. The ``reuse_port`` argument
|
||||
has the same meaning as for `.bind_sockets`.
|
||||
|
||||
This method may be called multiple times prior to `start` to listen
|
||||
on multiple ports or interfaces.
|
||||
|
||||
.. versionchanged:: 4.4
|
||||
Added the ``reuse_port`` argument.
|
||||
"""
|
||||
sockets = bind_sockets(
|
||||
port, address=address, family=family, backlog=backlog, reuse_port=reuse_port
|
||||
)
|
||||
if self._started:
|
||||
self.add_sockets(sockets)
|
||||
else:
|
||||
self._pending_sockets.extend(sockets)
|
||||
|
||||
def start(
|
||||
self, num_processes: Optional[int] = 1, max_restarts: Optional[int] = None
|
||||
) -> None:
|
||||
"""Starts this server in the `.IOLoop`.
|
||||
|
||||
By default, we run the server in this process and do not fork any
|
||||
additional child process.
|
||||
|
||||
If num_processes is ``None`` or <= 0, we detect the number of cores
|
||||
available on this machine and fork that number of child
|
||||
processes. If num_processes is given and > 1, we fork that
|
||||
specific number of sub-processes.
|
||||
|
||||
Since we use processes and not threads, there is no shared memory
|
||||
between any server code.
|
||||
|
||||
Note that multiple processes are not compatible with the autoreload
|
||||
module (or the ``autoreload=True`` option to `tornado.web.Application`
|
||||
which defaults to True when ``debug=True``).
|
||||
When using multiple processes, no IOLoops can be created or
|
||||
referenced until after the call to ``TCPServer.start(n)``.
|
||||
|
||||
Values of ``num_processes`` other than 1 are not supported on Windows.
|
||||
|
||||
The ``max_restarts`` argument is passed to `.fork_processes`.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
|
||||
Added ``max_restarts`` argument.
|
||||
"""
|
||||
assert not self._started
|
||||
self._started = True
|
||||
if num_processes != 1:
|
||||
process.fork_processes(num_processes, max_restarts)
|
||||
sockets = self._pending_sockets
|
||||
self._pending_sockets = []
|
||||
self.add_sockets(sockets)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stops listening for new connections.
|
||||
|
||||
Requests currently in progress may still continue after the
|
||||
server is stopped.
|
||||
"""
|
||||
if self._stopped:
|
||||
return
|
||||
self._stopped = True
|
||||
for fd, sock in self._sockets.items():
|
||||
assert sock.fileno() == fd
|
||||
# Unregister socket from IOLoop
|
||||
self._handlers.pop(fd)()
|
||||
sock.close()
|
||||
|
||||
def handle_stream(
|
||||
self, stream: IOStream, address: tuple
|
||||
) -> Optional[Awaitable[None]]:
|
||||
"""Override to handle a new `.IOStream` from an incoming connection.
|
||||
|
||||
This method may be a coroutine; if so any exceptions it raises
|
||||
asynchronously will be logged. Accepting of incoming connections
|
||||
will not be blocked by this coroutine.
|
||||
|
||||
If this `TCPServer` is configured for SSL, ``handle_stream``
|
||||
may be called before the SSL handshake has completed. Use
|
||||
`.SSLIOStream.wait_for_handshake` if you need to verify the client's
|
||||
certificate or use NPN/ALPN.
|
||||
|
||||
.. versionchanged:: 4.2
|
||||
Added the option for this method to be a coroutine.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _handle_connection(self, connection: socket.socket, address: Any) -> None:
|
||||
if self.ssl_options is not None:
|
||||
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
|
||||
try:
|
||||
connection = ssl_wrap_socket(
|
||||
connection,
|
||||
self.ssl_options,
|
||||
server_side=True,
|
||||
do_handshake_on_connect=False,
|
||||
)
|
||||
except ssl.SSLError as err:
|
||||
if err.args[0] == ssl.SSL_ERROR_EOF:
|
||||
return connection.close()
|
||||
else:
|
||||
raise
|
||||
except socket.error as err:
|
||||
# If the connection is closed immediately after it is created
|
||||
# (as in a port scan), we can get one of several errors.
|
||||
# wrap_socket makes an internal call to getpeername,
|
||||
# which may return either EINVAL (Mac OS X) or ENOTCONN
|
||||
# (Linux). If it returns ENOTCONN, this error is
|
||||
# silently swallowed by the ssl module, so we need to
|
||||
# catch another error later on (AttributeError in
|
||||
# SSLIOStream._do_ssl_handshake).
|
||||
# To test this behavior, try nmap with the -sT flag.
|
||||
# https://github.com/tornadoweb/tornado/pull/750
|
||||
if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
|
||||
return connection.close()
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
if self.ssl_options is not None:
|
||||
stream = SSLIOStream(
|
||||
connection,
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
read_chunk_size=self.read_chunk_size,
|
||||
) # type: IOStream
|
||||
else:
|
||||
stream = IOStream(
|
||||
connection,
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
read_chunk_size=self.read_chunk_size,
|
||||
)
|
||||
|
||||
future = self.handle_stream(stream, address)
|
||||
if future is not None:
|
||||
IOLoop.current().add_future(
|
||||
gen.convert_yielded(future), lambda f: f.result()
|
||||
)
|
||||
except Exception:
|
||||
app_log.error("Error in connection callback", exc_info=True)
|
1048
venv/Lib/site-packages/tornado/template.py
Normal file
1048
venv/Lib/site-packages/tornado/template.py
Normal file
File diff suppressed because it is too large
Load diff
12
venv/Lib/site-packages/tornado/test/__main__.py
Normal file
12
venv/Lib/site-packages/tornado/test/__main__.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
"""Shim to allow python -m tornado.test.
|
||||
|
||||
This only works in python 2.7+.
|
||||
"""
|
||||
from tornado.test.runtests import all, main
|
||||
|
||||
# tornado.testing.main autodiscovery relies on 'all' being present in
|
||||
# the main module, so import it here even though it is not used directly.
|
||||
# The following line prevents a pyflakes warning.
|
||||
all = all
|
||||
|
||||
main()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue