Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,5 @@
"""Tornado eventloop integration for pyzmq"""
from zmq.eventloop.ioloop import IOLoop
__all__ = ['IOLoop']

View file

@ -0,0 +1,214 @@
# coding: utf-8
"""tornado IOLoop API with zmq compatibility
If you have tornado 3.0, this is a subclass of tornado's IOLoop,
otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.
The minimal shipped version of tornado's IOLoop does not include
support for concurrent futures - this will only be available if you
have tornado 3.0.
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, division, with_statement
import os
import time
import warnings
from zmq import (
Poller,
POLLIN, POLLOUT, POLLERR,
ZMQError, ETERM,
)
try:
import tornado
tornado_version = tornado.version_info
except (ImportError, AttributeError):
tornado_version = ()
from .minitornado.ioloop import PollIOLoop, PeriodicCallback
from .minitornado.log import gen_log
class DelayedCallback(PeriodicCallback):
"""Schedules the given callback to be called once.
The callback is called once, after callback_time milliseconds.
`start` must be called after the DelayedCallback is created.
The timeout is calculated from when `start` is called.
"""
def __init__(self, callback, callback_time, io_loop=None):
# PeriodicCallback require callback_time to be positive
warnings.warn("""DelayedCallback is deprecated.
Use loop.add_timeout instead.""", DeprecationWarning)
callback_time = max(callback_time, 1e-3)
super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
def start(self):
"""Starts the timer."""
self._running = True
self._firstrun = True
self._next_timeout = time.time() + self.callback_time / 1000.0
self.io_loop.add_timeout(self._next_timeout, self._run)
def _run(self):
if not self._running: return
self._running = False
try:
self.callback()
except Exception:
gen_log.error("Error in delayed callback", exc_info=True)
class ZMQPoller(object):
"""A poller that can be used in the tornado IOLoop.
This simply wraps a regular zmq.Poller, scaling the timeout
by 1000, so that it is in seconds rather than milliseconds.
"""
def __init__(self):
self._poller = Poller()
@staticmethod
def _map_events(events):
"""translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR"""
z_events = 0
if events & IOLoop.READ:
z_events |= POLLIN
if events & IOLoop.WRITE:
z_events |= POLLOUT
if events & IOLoop.ERROR:
z_events |= POLLERR
return z_events
@staticmethod
def _remap_events(z_events):
"""translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR"""
events = 0
if z_events & POLLIN:
events |= IOLoop.READ
if z_events & POLLOUT:
events |= IOLoop.WRITE
if z_events & POLLERR:
events |= IOLoop.ERROR
return events
def register(self, fd, events):
return self._poller.register(fd, self._map_events(events))
def modify(self, fd, events):
return self._poller.modify(fd, self._map_events(events))
def unregister(self, fd):
return self._poller.unregister(fd)
def poll(self, timeout):
"""poll in seconds rather than milliseconds.
Event masks will be IOLoop.READ/WRITE/ERROR
"""
z_events = self._poller.poll(1000*timeout)
return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ]
def close(self):
pass
class ZMQIOLoop(PollIOLoop):
"""ZMQ subclass of tornado's IOLoop
Minor modifications, so that .current/.instance return self
"""
_zmq_impl = ZMQPoller
def initialize(self, impl=None, **kwargs):
impl = self._zmq_impl() if impl is None else impl
super(ZMQIOLoop, self).initialize(impl=impl, **kwargs)
@classmethod
def instance(cls, *args, **kwargs):
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
# install ZMQIOLoop as the active IOLoop implementation
# when using tornado 3
if tornado_version >= (3,):
PollIOLoop.configure(cls)
loop = PollIOLoop.instance(*args, **kwargs)
if not isinstance(loop, cls):
warnings.warn("IOLoop.current expected instance of %r, got %r" % (cls, loop),
RuntimeWarning, stacklevel=2,
)
return loop
@classmethod
def current(cls, *args, **kwargs):
"""Returns the current threads IOLoop.
"""
# install ZMQIOLoop as the active IOLoop implementation
# when using tornado 3
if tornado_version >= (3,):
PollIOLoop.configure(cls)
loop = PollIOLoop.current(*args, **kwargs)
if not isinstance(loop, cls):
warnings.warn("IOLoop.current expected instance of %r, got %r" % (cls, loop),
RuntimeWarning, stacklevel=2,
)
return loop
def start(self):
try:
super(ZMQIOLoop, self).start()
except ZMQError as e:
if e.errno == ETERM:
# quietly return on ETERM
pass
else:
raise
if (3, 0) <= tornado_version < (3, 1):
def backport_close(self, all_fds=False):
"""backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)"""
from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop
return mini_loop.close.__get__(self)(all_fds)
ZMQIOLoop.close = backport_close
# public API name
IOLoop = ZMQIOLoop
def install():
"""set the tornado IOLoop instance with the pyzmq IOLoop.
After calling this function, tornado's IOLoop.instance() and pyzmq's
IOLoop.instance() will return the same object.
An assertion error will be raised if tornado's IOLoop has been initialized
prior to calling this function.
"""
from tornado import ioloop
# check if tornado's IOLoop is already initialized to something other
# than the pyzmq IOLoop instance:
assert (not ioloop.IOLoop.initialized()) or \
ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized"
if tornado_version >= (3,):
# tornado 3 has an official API for registering new defaults, yay!
ioloop.IOLoop.configure(ZMQIOLoop)
else:
# we have to set the global instance explicitly
ioloop.IOLoop._instance = IOLoop.instance()

View file

@ -0,0 +1,73 @@
"""Future-returning APIs for tornado coroutines.
.. seealso::
:mod:`zmq.asyncio`
"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import zmq as _zmq
from zmq._future import _AsyncPoller, _AsyncSocket
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
class CancelledError(Exception):
pass
class _TornadoFuture(Future):
"""Subclass Tornado Future, reinstating cancellation."""
def cancel(self):
if self.done():
return False
self.set_exception(CancelledError())
return True
def cancelled(self):
return self.done() and isinstance(self.exception(), CancelledError)
# mixin for tornado/asyncio compatibility
class _AsyncTornado(object):
_Future = _TornadoFuture
_READ = IOLoop.READ
_WRITE = IOLoop.WRITE
def _default_loop(self):
return IOLoop.current()
class Poller(_AsyncTornado, _AsyncPoller):
def _watch_raw_socket(self, loop, socket, evt, f):
"""Schedule callback for a raw socket"""
loop.add_handler(socket, lambda *args: f(), evt)
def _unwatch_raw_sockets(self, loop, *sockets):
"""Unschedule callback for a raw socket"""
for socket in sockets:
loop.remove_handler(socket)
class Socket(_AsyncTornado, _AsyncSocket):
_poller_class = Poller
Poller._socket_class = Socket
class Context(_zmq.Context):
# avoid sharing instance with base Context class
_instance = None
io_loop = None
@staticmethod
def _socket_class(self, socket_type):
return Socket(self, socket_type, io_loop=self.io_loop)
def __init__(self, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None)
super(Context, self).__init__(*args, **kwargs)
self.io_loop = io_loop or IOLoop.current()

View file

@ -0,0 +1,136 @@
# coding: utf-8
"""tornado IOLoop API with zmq compatibility
This module is deprecated in pyzmq 17.
To use zmq with tornado,
eventloop integration is no longer required
and tornado itself should be used.
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, division, with_statement
import time
import warnings
try:
import tornado
from tornado.log import gen_log
from tornado import ioloop
if not hasattr(ioloop.IOLoop, 'configurable_default'):
raise ImportError("Tornado too old: %s" % getattr(tornado, 'version', 'unknown'))
except ImportError:
from .minitornado import ioloop
from .minitornado.log import gen_log
PeriodicCallback = ioloop.PeriodicCallback
class DelayedCallback(PeriodicCallback):
"""Schedules the given callback to be called once.
The callback is called once, after callback_time milliseconds.
`start` must be called after the DelayedCallback is created.
The timeout is calculated from when `start` is called.
"""
def __init__(self, callback, callback_time, io_loop=None):
# PeriodicCallback require callback_time to be positive
warnings.warn("""DelayedCallback is deprecated.
Use loop.add_timeout instead.""", DeprecationWarning)
callback_time = max(callback_time, 1e-3)
super(DelayedCallback, self).__init__(callback, callback_time, io_loop)
def start(self):
"""Starts the timer."""
self._running = True
self._firstrun = True
self._next_timeout = time.time() + self.callback_time / 1000.0
self.io_loop.add_timeout(self._next_timeout, self._run)
def _run(self):
if not self._running: return
self._running = False
try:
self.callback()
except Exception:
gen_log.error("Error in delayed callback", exc_info=True)
def _deprecated():
if _deprecated.called:
return
_deprecated.called = True
warnings.warn("zmq.eventloop.ioloop is deprecated in pyzmq 17."
" pyzmq now works with default tornado and asyncio eventloops.",
DeprecationWarning, stacklevel=3)
_deprecated.called = False
# resolve 'true' default loop
if '.minitornado.' in ioloop.__name__:
from ._deprecated import ZMQIOLoop as _IOLoop
else:
_IOLoop = ioloop.IOLoop
while _IOLoop.configurable_default() is not _IOLoop:
_IOLoop = _IOLoop.configurable_default()
class ZMQIOLoop(_IOLoop):
"""DEPRECATED: No longer needed as of pyzmq-17
PyZMQ tornado integration now works with the default :mod:`tornado.ioloop.IOLoop`.
"""
def __init__(self, *args, **kwargs):
_deprecated()
# super is object, which takes no args
return super(ZMQIOLoop, self).__init__()
@classmethod
def instance(cls, *args, **kwargs):
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
# install ZMQIOLoop as the active IOLoop implementation
# when using tornado 3
ioloop.IOLoop.configure(cls)
_deprecated()
loop = ioloop.IOLoop.instance(*args, **kwargs)
return loop
@classmethod
def current(cls, *args, **kwargs):
"""Returns the current threads IOLoop.
"""
# install ZMQIOLoop as the active IOLoop implementation
# when using tornado 3
ioloop.IOLoop.configure(cls)
_deprecated()
loop = ioloop.IOLoop.current(*args, **kwargs)
return loop
# public API name
IOLoop = ZMQIOLoop
def install():
"""DEPRECATED
pyzmq 17 no longer needs any special integration for tornado.
"""
_deprecated()
ioloop.IOLoop.configure(ZMQIOLoop)
# if minitornado is used, fallback on deprecated ZMQIOLoop, install implementations
if '.minitornado.' in ioloop.__name__:
from ._deprecated import ZMQIOLoop, install, IOLoop

View file

@ -0,0 +1,11 @@
import warnings
class VisibleDeprecationWarning(UserWarning):
"""A DeprecationWarning that users should see."""
pass
warnings.warn("""zmq.eventloop.minitornado is deprecated in pyzmq 14.0 and will be removed.
Install tornado itself to use zmq with the tornado IOLoop.
""",
VisibleDeprecationWarning,
stacklevel=4,
)

View file

@ -0,0 +1,14 @@
"""pyzmq does not ship tornado's futures,
this just raises informative NotImplementedErrors to avoid having to change too much code.
"""
class NotImplementedFuture(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError("pyzmq does not ship tornado's Futures, "
"install tornado >= 3.0 for future support."
)
Future = TracebackFuture = NotImplementedFuture
def is_future(x):
return isinstance(x, Future)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,6 @@
"""minimal subset of tornado.log for zmq.eventloop.minitornado"""
import logging
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")

View file

@ -0,0 +1,45 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
if os.name == 'nt':
from .common import Waker
from .windows import set_close_exec
else:
from .posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
except ImportError:
pass
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None

View file

@ -0,0 +1,91 @@
"""Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from . import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()

View file

@ -0,0 +1,63 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interfaces for platform-specific functionality.
This module exists primarily for documentation purposes and as base classes
for other tornado.platform modules. Most code should import the appropriate
implementation from `tornado.platform.auto`.
"""
from __future__ import absolute_import, division, print_function, with_statement
def set_close_exec(fd):
"""Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
raise NotImplementedError()
class Waker(object):
"""A socket-like object that can wake another thread from ``select()``.
The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
thread wants to wake up the loop, it calls `wake`. Once it has woken
up, it will call `consume` to do any necessary per-wake cleanup. When
the ``IOLoop`` is closed, it closes its waker too.
"""
def fileno(self):
"""Returns the read file descriptor for this waker.
Must be suitable for use with ``select()`` or equivalent on the
local platform.
"""
raise NotImplementedError()
def write_fileno(self):
"""Returns the write file descriptor for this waker."""
raise NotImplementedError()
def wake(self):
"""Triggers activity on the waker's file descriptor."""
raise NotImplementedError()
def consume(self):
"""Called after the listen has woken up to do any necessary cleanup."""
raise NotImplementedError()
def close(self):
"""Closes the waker's file descriptor(s)."""
raise NotImplementedError()

View file

@ -0,0 +1,70 @@
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import fcntl
import os
from . import interface
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.write(b"x")
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result:
break
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()

View file

@ -0,0 +1,20 @@
# NOTE: win32 support is currently experimental, and not recommended
# for production use.
from __future__ import absolute_import, division, print_function, with_statement
import ctypes
import ctypes.wintypes
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
SetHandleInformation.restype = ctypes.wintypes.BOOL
HANDLE_FLAG_INHERIT = 0x00000001
def set_close_exec(fd):
success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
if not success:
raise ctypes.GetLastError()

View file

@ -0,0 +1,388 @@
#!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""`StackContext` allows applications to maintain threadlocal-like state
that follows execution as it moves to other execution contexts.
The motivating examples are to eliminate the need for explicit
``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
allow some additional context to be kept for logging.
This is slightly magic, but it's an extension of the idea that an
exception handler is a kind of stack-local state and when that stack
is suspended and resumed in a new context that state needs to be
preserved. `StackContext` shifts the burden of restoring that state
from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
in ``async_callback``) to the mechanisms that transfer control from
one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
thread pools, etc).
Example usage::
@contextlib.contextmanager
def die_on_error():
try:
yield
except Exception:
logging.error("exception in asynchronous operation",exc_info=True)
sys.exit(1)
with StackContext(die_on_error):
# Any exception thrown here *or in callback and its descendants*
# will cause the process to exit instead of spinning endlessly
# in the ioloop.
http_client.fetch(url, callback)
ioloop.start()
Most applications shouldn't have to work with `StackContext` directly.
Here are a few rules of thumb for when it's necessary:
* If you're writing an asynchronous library that doesn't rely on a
stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
(for example, if you're writing a thread pool), use
`.stack_context.wrap()` before any asynchronous operations to capture the
stack context from where the operation was started.
* If you're writing an asynchronous library that has some shared
resources (such as a connection pool), create those shared resources
within a ``with stack_context.NullContext():`` block. This will prevent
``StackContexts`` from leaking from one request to another.
* If you want to write something like an exception handler that will
persist across asynchronous calls, create a new `StackContext` (or
`ExceptionStackContext`), and make your asynchronous calls in a ``with``
block that references your `StackContext`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
import threading
from .util import raise_exc_info
class StackContextInconsistentError(Exception):
pass
class _State(threading.local):
def __init__(self):
self.contexts = (tuple(), None)
_state = _State()
class StackContext(object):
"""Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
The result of ``with StackContext() as cb:`` is a deactivation
callback. Run this callback when the StackContext is no longer
needed to ensure that it is not propagated any further (note that
deactivating a context does not affect any instances of that
context that are currently pending). This is an advanced feature
and not necessary in most applications.
"""
def __init__(self, context_factory):
self.context_factory = context_factory
self.contexts = []
self.active = True
def _deactivate(self):
self.active = False
# StackContext protocol
def enter(self):
context = self.context_factory()
self.contexts.append(context)
context.__enter__()
def exit(self, type, value, traceback):
context = self.contexts.pop()
context.__exit__(type, value, traceback)
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0] + (self,), self)
_state.contexts = self.new_contexts
try:
self.enter()
except:
_state.contexts = self.old_contexts
raise
return self._deactivate
def __exit__(self, type, value, traceback):
try:
self.exit(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
# Generator coroutines and with-statements with non-local
# effects interact badly. Check here for signs of
# the stack getting out of sync.
# Note that this check comes after restoring _state.context
# so that if it fails things are left in a (relatively)
# consistent state.
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class ExceptionStackContext(object):
"""Specialization of StackContext for exception handling.
The supplied ``exception_handler`` function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
``exc_info`` triple ``(type, value, traceback)`` will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
"""
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.active = True
def _deactivate(self):
self.active = False
def exit(self, type, value, traceback):
if type is not None:
return self.exception_handler(type, value, traceback)
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0], self)
_state.contexts = self.new_contexts
return self._deactivate
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class NullContext(object):
"""Resets the `StackContext`.
Useful when creating a shared resource on demand (e.g. an
`.AsyncHTTPClient`) where the stack that caused the creating is
not relevant to future operations.
"""
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (tuple(), None)
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
def _remove_deactivated(contexts):
"""Remove deactivated handlers from the chain"""
# Clean ctx handlers
stack_contexts = tuple([h for h in contexts[0] if h.active])
# Find new head
head = contexts[1]
while head is not None and not head.active:
head = head.old_contexts[1]
# Process chain
ctx = head
while ctx is not None:
parent = ctx.old_contexts[1]
while parent is not None:
if parent.active:
break
ctx.old_contexts = parent.old_contexts
parent = parent.old_contexts[1]
ctx = parent
return (stack_contexts, head)
def wrap(fn):
"""Returns a callable object that will restore the current `StackContext`
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
"""
# Check if function is already wrapped
if fn is None or hasattr(fn, '_wrapped'):
return fn
# Capture current stack head
# TODO: Any other better way to store contexts and update them in wrapped function?
cap_contexts = [_state.contexts]
if not cap_contexts[0][0] and not cap_contexts[0][1]:
# Fast path when there are no active contexts.
def null_wrapper(*args, **kwargs):
try:
current_state = _state.contexts
_state.contexts = cap_contexts[0]
return fn(*args, **kwargs)
finally:
_state.contexts = current_state
null_wrapper._wrapped = True
return null_wrapper
def wrapped(*args, **kwargs):
ret = None
try:
# Capture old state
current_state = _state.contexts
# Remove deactivated items
cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
# Force new state
_state.contexts = contexts
# Current exception
exc = (None, None, None)
top = None
# Apply stack contexts
last_ctx = 0
stack = contexts[0]
# Apply state
for n in stack:
try:
n.enter()
last_ctx += 1
except:
# Exception happened. Record exception info and store top-most handler
exc = sys.exc_info()
top = n.old_contexts[1]
# Execute callback if no exception happened while restoring state
if top is None:
try:
ret = fn(*args, **kwargs)
except:
exc = sys.exc_info()
top = contexts[1]
# If there was exception, try to handle it by going through the exception chain
if top is not None:
exc = _handle_exception(top, exc)
else:
# Otherwise take shorter path and run stack contexts in reverse order
while last_ctx > 0:
last_ctx -= 1
c = stack[last_ctx]
try:
c.exit(*exc)
except:
exc = sys.exc_info()
top = c.old_contexts[1]
break
else:
top = None
# If if exception happened while unrolling, take longer exception handler path
if top is not None:
exc = _handle_exception(top, exc)
# If exception was not handled, raise it
if exc != (None, None, None):
raise_exc_info(exc)
finally:
_state.contexts = current_state
return ret
wrapped._wrapped = True
return wrapped
def _handle_exception(tail, exc):
while tail is not None:
try:
if tail.exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
tail = tail.old_contexts[1]
return exc
def run_with_stack_context(context, func):
"""Run a coroutine ``func`` in the given `StackContext`.
It is not safe to have a ``yield`` statement within a ``with StackContext``
block, so it is difficult to use stack context with `.gen.coroutine`.
This helper function runs the function in the correct context while
keeping the ``yield`` and ``with`` statements syntactically separate.
Example::
@gen.coroutine
def incorrect():
with StackContext(ctx):
# ERROR: this will raise StackContextInconsistentError
yield other_coroutine()
@gen.coroutine
def correct():
yield run_with_stack_context(StackContext(ctx), other_coroutine)
.. versionadded:: 3.1
"""
with context:
return func()

View file

@ -0,0 +1,216 @@
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if not isinstance(b'', type('')):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
# These names don't exist in py3, so use noqa comments to disable
# warnings in flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if isinstance(name, unicode_type) and str is not unicode_type:
# On python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, *args, **kwargs):
base = cls.configurable_base()
init_kwargs = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
init_kwargs.update(base.__impl_kwargs)
else:
impl = cls
init_kwargs.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(*args, **init_kwargs)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
.. versionchanged:: 4.2
Now accepts positional arguments in addition to keyword arguments.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)

View file

@ -0,0 +1,553 @@
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A utility class to send to and recv from a non-blocking socket,
using tornado.
.. seealso::
- :mod:`zmq.asyncio`
- :mod:`zmq.eventloop.future`
"""
from __future__ import with_statement
import sys
import warnings
import zmq
from zmq.utils import jsonapi
try:
import cPickle as pickle
except ImportError:
import pickle
from .ioloop import IOLoop, gen_log
try:
from tornado.stack_context import wrap as stack_context_wrap
except ImportError:
if "zmq.eventloop.minitornado" in sys.modules:
from .minitornado.stack_context import wrap as stack_context_wrap
else:
# tornado 5 deprecates stack_context,
# tornado 6 removes it
def stack_context_wrap(callback):
return callback
try:
from queue import Queue
except ImportError:
from Queue import Queue
from zmq.utils.strtypes import basestring
try:
callable
except NameError:
callable = lambda obj: hasattr(obj, '__call__')
class ZMQStream(object):
"""A utility class to register callbacks when a zmq socket sends and receives
For use with zmq.eventloop.ioloop
There are three main methods
Methods:
* **on_recv(callback, copy=True):**
register a callback to be run every time the socket has something to receive
* **on_send(callback):**
register a callback to be run every time you call send
* **send(self, msg, flags=0, copy=False, callback=None):**
perform a send that will trigger the callback
if callback is passed, on_send is also called.
There are also send_multipart(), send_json(), send_pyobj()
Three other methods for deactivating the callbacks:
* **stop_on_recv():**
turn off the recv callback
* **stop_on_send():**
turn off the send callback
which simply call ``on_<evt>(None)``.
The entire socket interface, excluding direct recv methods, is also
provided, primarily through direct-linking the methods.
e.g.
>>> stream.bind is stream.socket.bind
True
"""
socket = None
io_loop = None
poller = None
_send_queue = None
_recv_callback = None
_send_callback = None
_close_callback = None
_state = 0
_flushed = False
_recv_copy = False
_fd = None
def __init__(self, socket, io_loop=None):
self.socket = socket
self.io_loop = io_loop or IOLoop.current()
self.poller = zmq.Poller()
self._fd = self.socket.FD
self._send_queue = Queue()
self._recv_callback = None
self._send_callback = None
self._close_callback = None
self._recv_copy = False
self._flushed = False
self._state = 0
self._init_io_state()
# shortcircuit some socket methods
self.bind = self.socket.bind
self.bind_to_random_port = self.socket.bind_to_random_port
self.connect = self.socket.connect
self.setsockopt = self.socket.setsockopt
self.getsockopt = self.socket.getsockopt
self.setsockopt_string = self.socket.setsockopt_string
self.getsockopt_string = self.socket.getsockopt_string
self.setsockopt_unicode = self.socket.setsockopt_unicode
self.getsockopt_unicode = self.socket.getsockopt_unicode
def stop_on_recv(self):
"""Disable callback and automatic receiving."""
return self.on_recv(None)
def stop_on_send(self):
"""Disable callback on sending."""
return self.on_send(None)
def stop_on_err(self):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_err(self, callback):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_recv(self, callback, copy=True):
"""Register a callback for when a message is ready to recv.
There can be only one callback registered at a time, so each
call to `on_recv` replaces previously registered callbacks.
on_recv(None) disables recv event polling.
Use on_recv_stream(callback) instead, to register a callback that will receive
both this ZMQStream and the message, instead of just the message.
Parameters
----------
callback : callable
callback must take exactly one argument, which will be a
list, as returned by socket.recv_multipart()
if callback is None, recv callbacks are disabled.
copy : bool
copy is passed directly to recv, so if copy is False,
callback will receive Message objects. If copy is True,
then callback will receive bytes/str objects.
Returns : None
"""
self._check_closed()
assert callback is None or callable(callback)
self._recv_callback = stack_context_wrap(callback)
self._recv_copy = copy
if callback is None:
self._drop_io_state(zmq.POLLIN)
else:
self._add_io_state(zmq.POLLIN)
def on_recv_stream(self, callback, copy=True):
"""Same as on_recv, but callback will get this stream as first argument
callback must take exactly two arguments, as it will be called as::
callback(stream, msg)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_recv()
else:
self.on_recv(lambda msg: callback(self, msg), copy=copy)
def on_send(self, callback):
"""Register a callback to be called on each send
There will be two arguments::
callback(msg, status)
* `msg` will be the list of sendable objects that was just sent
* `status` will be the return result of socket.send_multipart(msg) -
MessageTracker or None.
Non-copying sends return a MessageTracker object whose
`done` attribute will be True when the send is complete.
This allows users to track when an object is safe to write to
again.
The second argument will always be None if copy=True
on the send.
Use on_send_stream(callback) to register a callback that will be passed
this ZMQStream as the first argument, in addition to the other two.
on_send(None) disables recv event polling.
Parameters
----------
callback : callable
callback must take exactly two arguments, which will be
the message being sent (always a list),
and the return result of socket.send_multipart(msg) -
MessageTracker or None.
if callback is None, send callbacks are disabled.
"""
self._check_closed()
assert callback is None or callable(callback)
self._send_callback = stack_context_wrap(callback)
def on_send_stream(self, callback):
"""Same as on_send, but callback will get this stream as first argument
Callback will be passed three arguments::
callback(stream, msg, status)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_send()
else:
self.on_send(lambda msg, status: callback(self, msg, status))
def send(self, msg, flags=0, copy=True, track=False, callback=None, **kwargs):
"""Send a message, optionally also register a new callback for sends.
See zmq.socket.send for details.
"""
return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback, **kwargs)
def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None, **kwargs):
"""Send a multipart message, optionally also register a new callback for sends.
See zmq.socket.send_multipart for details.
"""
kwargs.update(dict(flags=flags, copy=copy, track=track))
self._send_queue.put((msg, kwargs))
callback = callback or self._send_callback
if callback is not None:
self.on_send(callback)
else:
# noop callback
self.on_send(lambda *args: None)
self._add_io_state(zmq.POLLOUT)
def send_string(self, u, flags=0, encoding='utf-8', callback=None, **kwargs):
"""Send a unicode message with an encoding.
See zmq.socket.send_unicode for details.
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, callback=callback, **kwargs)
send_unicode = send_string
def send_json(self, obj, flags=0, callback=None, **kwargs):
"""Send json-serialized version of an object.
See zmq.socket.send_json for details.
"""
if jsonapi is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags=flags, callback=callback, **kwargs)
def send_pyobj(self, obj, flags=0, protocol=-1, callback=None, **kwargs):
"""Send a Python object as a message using pickle to serialize.
See zmq.socket.send_json for details.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags, callback=callback, **kwargs)
def _finish_flush(self):
"""callback for unsetting _flushed flag."""
self._flushed = False
def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
"""Flush pending messages.
This method safely handles all pending incoming and/or outgoing messages,
bypassing the inner loop, passing them to the registered callbacks.
A limit can be specified, to prevent blocking under high load.
flush will return the first time ANY of these conditions are met:
* No more events matching the flag are pending.
* the total number of events handled reaches the limit.
Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
is registered, unlike normal IOLoop operation. This allows flush to be
used to remove *and ignore* incoming messages.
Parameters
----------
flag : int, default=POLLIN|POLLOUT
0MQ poll flags.
If flag|POLLIN, recv events will be flushed.
If flag|POLLOUT, send events will be flushed.
Both flags can be set at once, which is the default.
limit : None or int, optional
The maximum number of messages to send or receive.
Both send and recv count against this limit.
Returns
-------
int : count of events handled (both send and recv)
"""
self._check_closed()
# unset self._flushed, so callbacks will execute, in case flush has
# already been called this iteration
already_flushed = self._flushed
self._flushed = False
# initialize counters
count = 0
def update_flag():
"""Update the poll flag, to prevent registering POLLOUT events
if we don't have pending sends."""
return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
flag = update_flag()
if not flag:
# nothing to do
return 0
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
while events and (not limit or count < limit):
s,event = events[0]
if event & zmq.POLLIN: # receiving
self._handle_recv()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
if event & zmq.POLLOUT and self.sending():
self._handle_send()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
flag = update_flag()
if flag:
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
else:
events = []
if count: # only bypass loop if we actually flushed something
# skip send/recv callbacks this iteration
self._flushed = True
# reregister them at the end of the loop
if not already_flushed: # don't need to do it again
self.io_loop.add_callback(self._finish_flush)
elif already_flushed:
self._flushed = True
# update ioloop poll state, which may have changed
self._rebuild_io_state()
return count
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
self._close_callback = stack_context_wrap(callback)
def close(self, linger=None):
"""Close this stream."""
if self.socket is not None:
if self.socket.closed:
# fallback on raw fd for closed sockets
# hopefully this happened promptly after close,
# otherwise somebody else may have the FD
warnings.warn(
"Unregistering FD %s after closing socket. "
"This could result in unregistering handlers for the wrong socket. "
"Please use stream.close() instead of closing the socket directly."
% self._fd,
stacklevel=2,
)
self.io_loop.remove_handler(self._fd)
else:
self.io_loop.remove_handler(self.socket)
self.socket.close(linger)
self.socket = None
if self._close_callback:
self._run_callback(self._close_callback)
def receiving(self):
"""Returns True if we are currently receiving from the stream."""
return self._recv_callback is not None
def sending(self):
"""Returns True if we are currently sending to the stream."""
return not self._send_queue.empty()
def closed(self):
if self.socket is None:
return True
if self.socket.closed:
# underlying socket has been closed, but not by us!
# trigger our cleanup
self.close()
return True
def _run_callback(self, callback, *args, **kwargs):
"""Wrap running callbacks in try/except to allow us to
close our socket."""
try:
# Use a NullContext to ensure that all StackContexts are run
# inside our blanket exception handler rather than outside.
callback(*args, **kwargs)
except:
gen_log.error("Uncaught exception in ZMQStream callback",
exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
def _handle_events(self, fd, events):
"""This method is the actual handler for IOLoop, that gets called whenever
an event on my socket is posted. It dispatches to _handle_recv, etc."""
if not self.socket:
gen_log.warning("Got events for closed stream %s", fd)
return
zmq_events = self.socket.EVENTS
try:
# dispatch events:
if zmq_events & zmq.POLLIN and self.receiving():
self._handle_recv()
if not self.socket:
return
if zmq_events & zmq.POLLOUT and self.sending():
self._handle_send()
if not self.socket:
return
# rebuild the poll state
self._rebuild_io_state()
except Exception:
gen_log.error("Uncaught exception in zmqstream callback",
exc_info=True)
raise
def _handle_recv(self):
"""Handle a recv event."""
if self._flushed:
return
try:
msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# state changed since poll event
pass
else:
raise
else:
if self._recv_callback:
callback = self._recv_callback
self._run_callback(callback, msg)
def _handle_send(self):
"""Handle a send event."""
if self._flushed:
return
if not self.sending():
gen_log.error("Shouldn't have handled a send event")
return
msg, kwargs = self._send_queue.get()
try:
status = self.socket.send_multipart(msg, **kwargs)
except zmq.ZMQError as e:
gen_log.error("SEND Error: %s", e)
status = e
if self._send_callback:
callback = self._send_callback
self._run_callback(callback, msg, status)
def _check_closed(self):
if not self.socket:
raise IOError("Stream is closed")
def _rebuild_io_state(self):
"""rebuild io state based on self.sending() and receiving()"""
if self.socket is None:
return
state = 0
if self.receiving():
state |= zmq.POLLIN
if self.sending():
state |= zmq.POLLOUT
self._state = state
self._update_handler(state)
def _add_io_state(self, state):
"""Add io_state to poller."""
self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state."""
if self.socket is None:
return
if state & self.socket.events:
# events still exist that haven't been processed
# explicitly schedule handling to avoid missing events due to edge-triggered FDs
self.io_loop.add_callback(lambda : self._handle_events(self.socket, 0))
def _init_io_state(self):
"""initialize the ioloop event handler"""
self.io_loop.add_handler(self.socket, self._handle_events, self.io_loop.READ)