Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

View file

@ -0,0 +1,11 @@
from __future__ import absolute_import
from .cloudpickle import * # noqa
from .cloudpickle_fast import CloudPickler, dumps, dump # noqa
# Conform to the convention used by python serialization libraries, which
# expose their Pickler subclass at top-level under the "Pickler" name.
Pickler = CloudPickler
__version__ = '1.6.0'

View file

@ -0,0 +1,842 @@
"""
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from typing import Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
try: # pragma: no branch
import typing_extensions as _typing_extensions
from typing_extensions import Literal, Final
except ImportError:
_typing_extensions = Literal = Final = None
if sys.version_info >= (3, 5, 3):
from typing import ClassVar
else: # pragma: no cover
ClassVar = None
if sys.version_info >= (3, 8):
from types import CellType
else:
def f():
a = 1
def g():
return a
return g
CellType = type(f().__closure__[0])
# cloudpickle is meant for inter process communication: we expect all
# communicating processes to run the same Python version hence we favor
# communication speed over compatibility:
DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
# Track the provenance of reconstructed dynamic classes to make it possible to
# recontruct instances from the matching singleton class definition when
# appropriate and preserve the usual "isinstance" semantics of Python objects.
_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
PYPY = platform.python_implementation() == "PyPy"
builtin_code_type = None
if PYPY:
# builtin-code objects only exist in pypy
builtin_code_type = type(float.__new__.__code__)
_extract_code_globals_cache = weakref.WeakKeyDictionary()
def _get_or_create_tracker_id(class_def):
with _DYNAMIC_CLASS_TRACKER_LOCK:
class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
if class_tracker_id is None:
class_tracker_id = uuid.uuid4().hex
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
_DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
return class_tracker_id
def _lookup_class_or_track(class_tracker_id, class_def):
if class_tracker_id is not None:
with _DYNAMIC_CLASS_TRACKER_LOCK:
class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
class_tracker_id, class_def)
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
return class_def
def _whichmodule(obj, name):
"""Find the module an object belongs to.
This function differs from ``pickle.whichmodule`` in two ways:
- it does not mangle the cases where obj's module is __main__ and obj was
not found in any module.
- Errors arising during module introspection are ignored, as those errors
are considered unwanted side effects.
"""
if sys.version_info[:2] < (3, 7) and isinstance(obj, typing.TypeVar): # pragma: no branch # noqa
# Workaround bug in old Python versions: prior to Python 3.7,
# T.__module__ would always be set to "typing" even when the TypeVar T
# would be defined in a different module.
#
# For such older Python versions, we ignore the __module__ attribute of
# TypeVar instances and instead exhaustively lookup those instances in
# all currently imported modules.
module_name = None
else:
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr or
# other threads importing at the same time.
for module_name, module in sys.modules.copy().items():
# Some modules such as coverage can inject non-module objects inside
# sys.modules
if (
module_name == '__main__' or
module is None or
not isinstance(module, types.ModuleType)
):
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except Exception:
pass
return None
def _is_importable(obj, name=None):
"""Dispatcher utility to test the importability of various constructs."""
if isinstance(obj, types.FunctionType):
return _lookup_module_and_qualname(obj, name=name) is not None
elif issubclass(type(obj), type):
return _lookup_module_and_qualname(obj, name=name) is not None
elif isinstance(obj, types.ModuleType):
# We assume that sys.modules is primarily used as a cache mechanism for
# the Python import machinery. Checking if a module has been added in
# is sys.modules therefore a cheap and simple heuristic to tell us whether
# we can assume that a given module could be imported by name in
# another Python process.
return obj.__name__ in sys.modules
else:
raise TypeError(
"cannot check importability of {} instances".format(
type(obj).__name__)
)
def _lookup_module_and_qualname(obj, name=None):
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None: # pragma: no cover
# This used to be needed for Python 2.7 support but is probably not
# needed anymore. However we keep the __name__ introspection in case
# users of cloudpickle rely on this old behavior for unknown reasons.
name = getattr(obj, '__name__', None)
module_name = _whichmodule(obj, name)
if module_name is None:
# In this case, obj.__module__ is None AND obj was not found in any
# imported module. obj is thus treated as dynamic.
return None
if module_name == "__main__":
return None
# Note: if module_name is in sys.modules, the corresponding module is
# assumed importable at unpickling time. See #357
module = sys.modules.get(module_name, None)
if module is None:
# The main reason why obj's module would not be imported is that this
# module has been dynamically created, using for example
# types.ModuleType. The other possibility is that module was removed
# from sys.modules after obj was created/imported. But this case is not
# supported, as the standard pickle does not support it either.
return None
try:
obj2, parent = _getattribute(module, name)
except AttributeError:
# obj was not found inside the module it points to
return None
if obj2 is not obj:
return None
return module, name
def _extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
out_names = _extract_code_globals_cache.get(co)
if out_names is None:
names = co.co_names
out_names = {names[oparg] for _, oparg in _walk_global_ops(co)}
# Declaring a function inside another one using the "def ..."
# syntax generates a constant code object corresonding to the one
# of the nested function's As the nested function may itself need
# global variables, we need to introspect its code, extract its
# globals, (look for code object in it's co_consts attribute..) and
# add the result to code_globals
if co.co_consts:
for const in co.co_consts:
if isinstance(const, types.CodeType):
out_names |= _extract_code_globals(const)
_extract_code_globals_cache[co] = out_names
return out_names
def _find_imported_submodules(code, top_level_dependencies):
"""
Find currently imported submodules used by a function.
Submodules used by a function need to be detected and referenced for the
function to work correctly at depickling time. Because submodules can be
referenced as attribute of their parent package (``package.submodule``), we
need a special introspection technique that does not rely on GLOBAL-related
opcodes to find references of them in a code object.
Example:
```
import concurrent.futures
import cloudpickle
def func():
x = concurrent.futures.ThreadPoolExecutor
if __name__ == '__main__':
cloudpickle.dumps(func)
```
The globals extracted by cloudpickle in the function's state include the
concurrent package, but not its submodule (here, concurrent.futures), which
is the module used by func. Find_imported_submodules will detect the usage
of concurrent.futures. Saving this module alongside with func will ensure
that calling func once depickled does not fail due to concurrent.futures
not being imported
"""
subimports = []
# check if any known dependency is an imported package
for x in top_level_dependencies:
if (isinstance(x, types.ModuleType) and
hasattr(x, '__package__') and x.__package__):
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
# A concurrent thread could mutate sys.modules,
# make sure we iterate over a copy to avoid exceptions
for name in list(sys.modules):
# Older versions of pytest will add a "None" module to
# sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
subimports.append(sys.modules[name])
return subimports
def cell_set(cell, value):
"""Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file.
"""
if sys.version_info[:2] >= (3, 7): # pragma: no branch
cell.cell_contents = value
else:
_cell_set = types.FunctionType(
_cell_set_template_code, {}, '_cell_set', (), (cell,),)
_cell_set(value)
def _make_cell_set_template_code():
def _cell_set_factory(value):
lambda: cell
cell = value
co = _cell_set_factory.__code__
_cell_set_template_code = types.CodeType(
co.co_argcount,
co.co_kwonlyargcount, # Python 3 only argument
co.co_nlocals,
co.co_stacksize,
co.co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_cellvars, # co_freevars is initialized with co_cellvars
(), # co_cellvars is made empty
)
return _cell_set_template_code
if sys.version_info[:2] < (3, 7):
_cell_set_template_code = _make_cell_set_template_code()
# relevant opcodes
STORE_GLOBAL = opcode.opmap['STORE_GLOBAL']
DELETE_GLOBAL = opcode.opmap['DELETE_GLOBAL']
LOAD_GLOBAL = opcode.opmap['LOAD_GLOBAL']
GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
_BUILTIN_TYPE_NAMES = {}
for k, v in types.__dict__.items():
if type(v) is type:
_BUILTIN_TYPE_NAMES[v] = k
def _builtin_type(name):
if name == "ClassType": # pragma: no cover
# Backward compat to load pickle files generated with cloudpickle
# < 1.3 even if loading pickle files from older versions is not
# officially supported.
return type
return getattr(types, name)
def _walk_global_ops(code):
"""
Yield (opcode, argument number) tuples for all
global-referencing instructions in *code*.
"""
for instr in dis.get_instructions(code):
op = instr.opcode
if op in GLOBAL_OPS:
yield op, instr.arg
def _extract_class_dict(cls):
"""Retrieve a copy of the dict of a class without the inherited methods"""
clsdict = dict(cls.__dict__) # copy dict proxy to a dict
if len(cls.__bases__) == 1:
inherited_dict = cls.__bases__[0].__dict__
else:
inherited_dict = {}
for base in reversed(cls.__bases__):
inherited_dict.update(base.__dict__)
to_remove = []
for name, value in clsdict.items():
try:
base_value = inherited_dict[name]
if value is base_value:
to_remove.append(name)
except KeyError:
pass
for name in to_remove:
clsdict.pop(name)
return clsdict
if sys.version_info[:2] < (3, 7): # pragma: no branch
def _is_parametrized_type_hint(obj):
# This is very cheap but might generate false positives.
# general typing Constructs
is_typing = getattr(obj, '__origin__', None) is not None
# typing_extensions.Literal
is_litteral = getattr(obj, '__values__', None) is not None
# typing_extensions.Final
is_final = getattr(obj, '__type__', None) is not None
# typing.Union/Tuple for old Python 3.5
is_union = getattr(obj, '__union_params__', None) is not None
is_tuple = getattr(obj, '__tuple_params__', None) is not None
is_callable = (
getattr(obj, '__result__', None) is not None and
getattr(obj, '__args__', None) is not None
)
return any((is_typing, is_litteral, is_final, is_union, is_tuple,
is_callable))
def _create_parametrized_type_hint(origin, args):
return origin[args]
else:
_is_parametrized_type_hint = None
_create_parametrized_type_hint = None
def parametrized_type_hint_getinitargs(obj):
# The distorted type check sematic for typing construct becomes:
# ``type(obj) is type(TypeHint)``, which means "obj is a
# parametrized TypeHint"
if type(obj) is type(Literal): # pragma: no branch
initargs = (Literal, obj.__values__)
elif type(obj) is type(Final): # pragma: no branch
initargs = (Final, obj.__type__)
elif type(obj) is type(ClassVar):
initargs = (ClassVar, obj.__type__)
elif type(obj) is type(Generic):
parameters = obj.__parameters__
if len(obj.__parameters__) > 0:
# in early Python 3.5, __parameters__ was sometimes
# preferred to __args__
initargs = (obj.__origin__, parameters)
else:
initargs = (obj.__origin__, obj.__args__)
elif type(obj) is type(Union):
if sys.version_info < (3, 5, 3): # pragma: no cover
initargs = (Union, obj.__union_params__)
else:
initargs = (Union, obj.__args__)
elif type(obj) is type(Tuple):
if sys.version_info < (3, 5, 3): # pragma: no cover
initargs = (Tuple, obj.__tuple_params__)
else:
initargs = (Tuple, obj.__args__)
elif type(obj) is type(Callable):
if sys.version_info < (3, 5, 3): # pragma: no cover
args = obj.__args__
result = obj.__result__
if args != Ellipsis:
if isinstance(args, tuple):
args = list(args)
else:
args = [args]
else:
(*args, result) = obj.__args__
if len(args) == 1 and args[0] is Ellipsis:
args = Ellipsis
else:
args = list(args)
initargs = (Callable, (args, result))
else: # pragma: no cover
raise pickle.PicklingError(
"Cloudpickle Error: Unknown type {}".format(type(obj))
)
return initargs
# Tornado support
def is_tornado_coroutine(func):
"""
Return whether *func* is a Tornado coroutine function.
Running coroutines are not supported.
"""
if 'tornado.gen' not in sys.modules:
return False
gen = sys.modules['tornado.gen']
if not hasattr(gen, "is_coroutine_function"):
# Tornado version is too old
return False
return gen.is_coroutine_function(func)
def _rebuild_tornado_coroutine(func):
from tornado import gen
return gen.coroutine(func)
# including pickles unloading functions in this namespace
load = pickle.load
loads = pickle.loads
# hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
def dynamic_subimport(name, vars):
mod = types.ModuleType(name)
mod.__dict__.update(vars)
mod.__dict__['__builtins__'] = builtins.__dict__
return mod
def _gen_ellipsis():
return Ellipsis
def _gen_not_implemented():
return NotImplemented
def _get_cell_contents(cell):
try:
return cell.cell_contents
except ValueError:
# sentinel used by ``_fill_function`` which will leave the cell empty
return _empty_cell_value
def instance(cls):
"""Create a new instance of a class.
Parameters
----------
cls : type
The class to create an instance of.
Returns
-------
instance : cls
A new instance of ``cls``.
"""
return cls()
@instance
class _empty_cell_value(object):
"""sentinel for empty closures
"""
@classmethod
def __reduce__(cls):
return cls.__name__
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
if 'kwdefaults' in state:
func.__kwdefaults__ = state['kwdefaults']
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
if '_cloudpickle_submodules' in state:
state.pop('_cloudpickle_submodules')
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError('this route should not be executed')
return (lambda: cell).__closure__[0]
def _make_cell(value=_empty_cell_value):
cell = _make_empty_cell()
if value is not _empty_cell_value:
cell_set(cell, value)
return cell
def _make_skel_func(code, cell_count, base_globals=None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
# This function is deprecated and should be removed in cloudpickle 1.7
warnings.warn(
"A pickle file created using an old (<=1.4.1) version of cloudpicke "
"is currently being loaded. This is not supported by cloudpickle and "
"will break in cloudpickle 1.7", category=UserWarning
)
# This is backward-compatibility code: for cloudpickle versions between
# 0.5.4 and 0.7, base_globals could be a string or None. base_globals
# should now always be a dictionary.
if base_globals is None or isinstance(base_globals, str):
base_globals = {}
base_globals['__builtins__'] = __builtins__
closure = (
tuple(_make_empty_cell() for _ in range(cell_count))
if cell_count >= 0 else
None
)
return types.FunctionType(code, base_globals, None, None, closure)
def _make_skeleton_class(type_constructor, name, bases, type_kwargs,
class_tracker_id, extra):
"""Build dynamic class with an empty __dict__ to be filled once memoized
If class_tracker_id is not None, try to lookup an existing class definition
matching that id. If none is found, track a newly reconstructed class
definition under that id so that other instances stemming from the same
class id will also reuse this class definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
skeleton_class = types.new_class(
name, bases, {'metaclass': type_constructor},
lambda ns: ns.update(type_kwargs)
)
return _lookup_class_or_track(class_tracker_id, skeleton_class)
def _rehydrate_skeleton_class(skeleton_class, class_dict):
"""Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info.
"""
registry = None
for attrname, attr in class_dict.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(skeleton_class, attrname, attr)
if registry is not None:
for subclass in registry:
skeleton_class.register(subclass)
return skeleton_class
def _make_skeleton_enum(bases, name, qualname, members, module,
class_tracker_id, extra):
"""Build dynamic enum with an empty __dict__ to be filled once memoized
The creation of the enum class is inspired by the code of
EnumMeta._create_.
If class_tracker_id is not None, try to lookup an existing enum definition
matching that id. If none is found, track a newly reconstructed enum
definition under that id so that other instances stemming from the same
class id will also reuse this enum definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
# enums always inherit from their base Enum class at the last position in
# the list of base classes:
enum_base = bases[-1]
metacls = enum_base.__class__
classdict = metacls.__prepare__(name, bases)
for member_name, member_value in members.items():
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, name, bases, classdict)
enum_class.__module__ = module
enum_class.__qualname__ = qualname
return _lookup_class_or_track(class_tracker_id, enum_class)
def _make_typevar(name, bound, constraints, covariant, contravariant,
class_tracker_id):
tv = typing.TypeVar(
name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant
)
if class_tracker_id is not None:
return _lookup_class_or_track(class_tracker_id, tv)
else: # pragma: nocover
# Only for Python 3.5.3 compat.
return tv
def _decompose_typevar(obj):
try:
class_tracker_id = _get_or_create_tracker_id(obj)
except TypeError: # pragma: nocover
# TypeVar instances are not weakref-able in Python 3.5.3
class_tracker_id = None
return (
obj.__name__, obj.__bound__, obj.__constraints__,
obj.__covariant__, obj.__contravariant__,
class_tracker_id,
)
def _typevar_reduce(obj):
# TypeVar instances have no __qualname__ hence we pass the name explicitly.
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
if module_and_name is None:
return (_make_typevar, _decompose_typevar(obj))
return (getattr, module_and_name)
def _get_bases(typ):
if hasattr(typ, '__orig_bases__'):
# For generic types (see PEP 560)
bases_attr = '__orig_bases__'
else:
# For regular class objects
bases_attr = '__bases__'
return getattr(typ, bases_attr)
def _make_dict_keys(obj):
return dict.fromkeys(obj).keys()
def _make_dict_values(obj):
return {i: _ for i, _ in enumerate(obj)}.values()
def _make_dict_items(obj):
return obj.items()

View file

@ -0,0 +1,770 @@
"""
New, fast version of the CloudPickler.
This new CloudPickler class can now extend the fast C Pickler instead of the
previous Python implementation of the Pickler class. Because this functionality
is only available for Python versions 3.8+, a lot of backward-compatibility
code is also removed.
Note that the C Pickler sublassing API is CPython-specific. Therefore, some
guards present in cloudpickle.py that were written to handle PyPy specificities
are not present in cloudpickle_fast.py
"""
import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _is_importable,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items,
)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__slots__" in obj.__dict__:
type_kwargs["__slots__"] = obj.__slots__
__dict__ = obj.__dict__.get('__dict__', None)
if isinstance(__dict__, property):
type_kwargs['__dict__'] = __dict__
return (type(obj), obj.__name__, _get_bases(obj), type_kwargs,
_get_or_create_tracker_id(obj), None)
def _enum_getnewargs(obj):
members = dict((e.name, e.value) for e in obj)
return (obj.__bases__, obj.__name__, obj.__qualname__, members,
obj.__module__, _get_or_create_tracker_id(obj), None)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop('__weakref__', None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, dont pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop('_abc_cache', None)
clsdict.pop('_abc_negative_cache', None)
clsdict.pop('_abc_negative_cache_version', None)
registry = clsdict.pop('_abc_registry', None)
if registry is None:
# in Python3.7+, the abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop('_abc_impl', None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref()
for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop('__dict__', None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = dict((e.name, e.value) for e in obj)
# Cleanup the clsdict that will be passed to _rehydrate_skeleton_class:
# Those attributes are already handled by the metaclass.
for attrname in ["_generate_next_value_", "_member_names_",
"_member_map_", "_member_type_",
"_value2member_map_"]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""codeobject reducer"""
if hasattr(obj, "co_posonlyargcount"): # pragma: no branch
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, )
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _is_importable(obj):
return subimport, (obj.__name__,)
else:
obj.__dict__.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, vars(obj))
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _is_importable(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), )
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), )
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj), )
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynaamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (types.FunctionType, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _is_importable(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `Cloudpickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# distpatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _is_importable(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _is_importable(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function

View file

@ -0,0 +1,13 @@
import sys
if sys.version_info < (3, 8):
try:
import pickle5 as pickle # noqa: F401
from pickle5 import Pickler # noqa: F401
except ImportError:
import pickle # noqa: F401
from pickle import _Pickler as Pickler # noqa: F401
else:
import pickle # noqa: F401
from _pickle import Pickler # noqa: F401

View file

@ -0,0 +1,25 @@
r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
It provides a robust and dynamic implementation os the
:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
hide the pool management under the hood.
"""
from ._base import Executor, Future
from ._base import wait, as_completed
from ._base import TimeoutError, CancelledError
from ._base import ALL_COMPLETED, FIRST_COMPLETED, FIRST_EXCEPTION
from .backend.context import cpu_count
from .backend.reduction import set_loky_pickler
from .reusable_executor import get_reusable_executor
from .cloudpickle_wrapper import wrap_non_picklable_objects
from .process_executor import BrokenProcessPool, ProcessPoolExecutor
__all__ = ["get_reusable_executor", "cpu_count", "wait", "as_completed",
"Future", "Executor", "ProcessPoolExecutor",
"BrokenProcessPool", "CancelledError", "TimeoutError",
"FIRST_COMPLETED", "FIRST_EXCEPTION", "ALL_COMPLETED",
"wrap_non_picklable_objects", "set_loky_pickler"]
__version__ = '2.9.0'

View file

@ -0,0 +1,627 @@
###############################################################################
# Backport concurrent.futures for python2.7/3.3
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from concurrent/futures/_base.py (17/02/2017)
# * Do not use yield from
# * Use old super syntax
#
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import sys
import time
import logging
import threading
import collections
if sys.version_info[:2] >= (3, 3):
from concurrent.futures import wait, as_completed
from concurrent.futures import TimeoutError, CancelledError
from concurrent.futures import Executor, Future as _BaseFuture
from concurrent.futures import FIRST_EXCEPTION
from concurrent.futures import ALL_COMPLETED, FIRST_COMPLETED
from concurrent.futures._base import LOGGER
from concurrent.futures._base import PENDING, RUNNING, CANCELLED
from concurrent.futures._base import CANCELLED_AND_NOTIFIED, FINISHED
else:
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions.
"""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED]
for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different
Executors) to iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete
(finished or cancelled). If any given Futures are duplicated, they
will be returned once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError('%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different
Executors) to wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The
options are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are
cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class _BaseFuture(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing.
"""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The
callable will always be called by a thread in the same
process in which it was added. If the future has already
completed or been cancelled then the callable will be
called immediately. These callables are called in the order
that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the
future isn't done. If None, then there is no limit on the
wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the
given timeout.
Exception: If the call raised then that exception will be
raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future
represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the
wait time.
Returns:
The exception raised by the call that the future represents or
None if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the
given timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though
calls to as_completed() or wait()) are notified and False is
returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method
returns False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if
set_result() or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors.
"""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and
returns a Future instance representing the execution of the
callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then
there is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken
into before being passed to a child process. This argument
is only used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls
may be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be
generated before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by
the executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
# To make loky._base.Future instances awaitable by concurrent.futures.wait,
# derive our custom Future class from _BaseFuture. _invoke_callback is the only
# modification made to this class in loky.
class Future(_BaseFuture):
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except BaseException:
LOGGER.exception('exception calling callback for %r', self)

View file

@ -0,0 +1,16 @@
import os
import sys
from .context import get_context
if sys.version_info > (3, 4):
def _make_name():
name = '/loky-%i-%s' % (os.getpid(), next(synchronize.SemLock._rand))
return name
# monkey patch the name creation for multiprocessing
from multiprocessing import synchronize
synchronize.SemLock._make_name = staticmethod(_make_name)
__all__ = ["get_context"]

View file

@ -0,0 +1,76 @@
###############################################################################
# Extra reducers for Unix based system and connections objects
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Add adapted reduction for LokyProcesses and socket/Connection
#
import os
import sys
import socket
import _socket
from .reduction import register
from .context import get_spawning_popen
if sys.version_info >= (3, 3):
from multiprocessing.connection import Connection
else:
from _multiprocessing import Connection
HAVE_SEND_HANDLE = (hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg'))
def _mk_inheritable(fd):
if sys.version_info[:2] > (3, 3):
os.set_inheritable(fd, True)
return fd
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE and sys.version_info[:2] > (3, 3):
from multiprocessing import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise TypeError(
'Cannot pickle connection object. This object can only be '
'passed when spawning a new process'
)
if sys.version_info[:2] != (3, 3):
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.fromfd(fd, family, type, proto)
else:
from multiprocessing.reduction import reduce_socket as _reduce_socket
register(socket.socket, _reduce_socket)
register(_socket.socket, _reduce_socket)
if sys.version_info[:2] != (3, 3):
def reduce_connection(conn):
df = DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
else:
from multiprocessing.reduction import reduce_connection
register(Connection, reduce_connection)

View file

@ -0,0 +1,105 @@
###############################################################################
# Compat for wait function on UNIX based system
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/connection.py (17/02/2017)
# * Backport wait function to python2.7
#
import platform
import select
import socket
import errno
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else: # pragma: no cover
from time import time as monotonic
if hasattr(select, 'poll'):
def _poll(fds, timeout):
if timeout is not None:
timeout = int(timeout * 1000) # timeout is in milliseconds
fd_map = {}
pollster = select.poll()
for fd in fds:
pollster.register(fd, select.POLLIN)
if hasattr(fd, 'fileno'):
fd_map[fd.fileno()] = fd
else:
fd_map[fd] = fd
ls = []
for fd, event in pollster.poll(timeout):
if event & select.POLLNVAL: # pragma: no cover
raise ValueError('invalid file descriptor %i' % fd)
ls.append(fd_map[fd])
return ls
else:
def _poll(fds, timeout):
return select.select(fds, [], [], timeout)[0]
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects which are ready/readable.
'''
if timeout is not None:
if timeout <= 0:
return _poll(object_list, 0)
else:
deadline = monotonic() + timeout
while True:
try:
return _poll(object_list, timeout)
except (OSError, IOError, socket.error) as e: # pragma: no cover
if e.errno != errno.EINTR:
raise
if timeout is not None:
timeout = deadline - monotonic()

View file

@ -0,0 +1,99 @@
###############################################################################
# Extra reducers for Windows system and connections objects
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Add adapted reduction for LokyProcesses and socket/PipeConnection
#
import os
import sys
import socket
from .reduction import register
if sys.platform == 'win32':
if sys.version_info[:2] < (3, 3):
from _multiprocessing import PipeConnection
else:
import _winapi
from multiprocessing.connection import PipeConnection
if sys.version_info[:2] >= (3, 4) and sys.platform == 'win32':
class DupHandle(object):
def __init__(self, handle, access, pid=None):
# duplicate handle for process with given pid
if pid is None:
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
return self._handle
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
from multiprocessing.connection import PipeConnection
handle = dh.detach()
return PipeConnection(handle, readable, writable)
register(PipeConnection, reduce_pipe_connection)
elif sys.platform == 'win32':
# Older Python versions
from multiprocessing.reduction import reduce_pipe_connection
register(PipeConnection, reduce_pipe_connection)
if sys.version_info[:2] < (3, 3) and sys.platform == 'win32':
from _multiprocessing import win32
from multiprocessing.reduction import reduce_handle, rebuild_handle
close = win32.CloseHandle
def fromfd(handle, family, type_, proto=0):
s = socket.socket(family, type_, proto, fileno=handle)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
if not hasattr(socket, "fromfd"):
raise TypeError("sockets cannot be pickled on this system.")
reduced_handle = reduce_handle(s.fileno())
return _rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def _rebuild_socket(reduced_handle, family, type_, proto):
handle = rebuild_handle(reduced_handle)
s = fromfd(handle, family, type_, proto)
close(handle)
return s
register(socket.socket, reduce_socket)
elif sys.version_info[:2] < (3, 4):
from multiprocessing.reduction import reduce_socket
register(socket.socket, reduce_socket)
else:
from multiprocessing.reduction import _reduce_socket
register(socket.socket, _reduce_socket)

View file

@ -0,0 +1,58 @@
###############################################################################
# Compat for wait function on Windows system
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/connection.py (17/02/2017)
# * Backport wait function to python2.7
#
import ctypes
import sys
from time import sleep
if sys.platform == 'win32' and sys.version_info[:2] < (3, 3):
from _subprocess import WaitForSingleObject, WAIT_OBJECT_0
try:
from time import monotonic
except ImportError:
# Backward old for crappy old Python that did not have cross-platform
# monotonic clock by default.
# TODO: do we want to add support for cygwin at some point? See:
# https://github.com/atdt/monotonic/blob/master/monotonic.py
GetTickCount64 = ctypes.windll.kernel32.GetTickCount64
GetTickCount64.restype = ctypes.c_ulonglong
def monotonic():
"""Monotonic clock, cannot go backward."""
return GetTickCount64() / 1000.0
def wait(handles, timeout=None):
"""Backward compat for python2.7
This function wait for either:
* one connection is ready for read,
* one process handle has exited or got killed,
* timeout is reached. Note that this function has a precision of 2
msec.
"""
if timeout is not None:
deadline = monotonic() + timeout
while True:
# We cannot use select as in windows it only support sockets
ready = []
for h in handles:
if type(h) in [int, long]:
if WaitForSingleObject(h, 0) == WAIT_OBJECT_0:
ready += [h]
elif h.poll(0):
ready.append(h)
if len(ready) > 0:
return ready
sleep(.001)
if timeout is not None and deadline - monotonic() <= 0:
return []

View file

@ -0,0 +1,41 @@
###############################################################################
# Compat file to import the correct modules for each platform and python
# version.
#
# author: Thomas Moreau and Olivier grisel
#
import sys
PY3 = sys.version_info[:2] >= (3, 3)
if PY3:
import queue
else:
import Queue as queue
if sys.version_info >= (3, 4):
from multiprocessing.process import BaseProcess
else:
from multiprocessing.process import Process as BaseProcess
# Platform specific compat
if sys.platform == "win32":
from .compat_win32 import wait
else:
from .compat_posix import wait
def set_cause(exc, cause):
exc.__cause__ = cause
if not PY3:
# Preformat message here.
if exc.__cause__ is not None:
exc.args = ("{}\n\nThis was caused directly by {}".format(
exc.args if len(exc.args) != 1 else exc.args[0],
str(exc.__cause__)),)
return exc
__all__ = ["queue", "BaseProcess", "set_cause", "wait"]

View file

@ -0,0 +1,13 @@
# flake8: noqa
###############################################################################
# Compat file to load the correct wait function
#
# author: Thomas Moreau and Olivier grisel
#
import sys
# Compat wait
if sys.version_info < (3, 3):
from ._posix_wait import wait
else:
from multiprocessing.connection import wait

View file

@ -0,0 +1,46 @@
# flake8: noqa: F401
import sys
import numbers
if sys.platform == "win32":
# Avoid import error by code introspection tools such as test runners
# trying to import this module while running on non-Windows systems.
# Compat Popen
if sys.version_info[:2] >= (3, 4):
from multiprocessing.popen_spawn_win32 import Popen
else:
from multiprocessing.forking import Popen
# wait compat
if sys.version_info[:2] < (3, 3):
from ._win_wait import wait
else:
from multiprocessing.connection import wait
# Compat _winapi
if sys.version_info[:2] >= (3, 4):
import _winapi
else:
import os
import msvcrt
if sys.version_info[:2] < (3, 3):
import _subprocess as win_api
from _multiprocessing import win32
else:
import _winapi as win_api
class _winapi:
CreateProcess = win_api.CreateProcess
@staticmethod
def CloseHandle(h):
if isinstance(h, numbers.Integral):
# Cast long to int for 64-bit Python 2.7 under Windows
h = int(h)
if sys.version_info[:2] < (3, 3):
if not isinstance(h, int):
h = h.Detach()
win32.CloseHandle(h)
else:
win_api.CloseHandle(h)

View file

@ -0,0 +1,367 @@
###############################################################################
# Basic context management with LokyContext and provides
# compat for UNIX 2.7 and 3.3
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/context.py
# * Create a context ensuring loky uses only objects that are compatible
# * Add LokyContext to the list of context of multiprocessing so loky can be
# used with multiprocessing.set_start_method
# * Add some compat function for python2.7 and 3.3.
#
from __future__ import division
import os
import sys
import subprocess
import traceback
import warnings
import multiprocessing as mp
from .process import LokyProcess, LokyInitMainProcess
START_METHODS = ['loky', 'loky_init_main']
_DEFAULT_START_METHOD = None
# Cache for the number of physical cores to avoid repeating subprocess calls.
# It should not change during the lifetime of the program.
physical_cores_cache = None
if sys.version_info[:2] >= (3, 4):
from multiprocessing import get_context as mp_get_context
from multiprocessing.context import assert_spawning, set_spawning_popen
from multiprocessing.context import get_spawning_popen, BaseContext
START_METHODS += ['spawn']
if sys.platform != 'win32':
START_METHODS += ['fork', 'forkserver']
def get_context(method=None):
# Try to overload the default context
method = method or _DEFAULT_START_METHOD or "loky"
if method == "fork":
# If 'fork' is explicitly requested, warn user about potential
# issues.
warnings.warn("`fork` start method should not be used with "
"`loky` as it does not respect POSIX. Try using "
"`spawn` or `loky` instead.", UserWarning)
try:
context = mp_get_context(method)
except ValueError:
raise ValueError("Unknown context '{}'. Value should be in {}."
.format(method, START_METHODS))
return context
else:
if sys.platform != 'win32':
import threading
# Mechanism to check that the current thread is spawning a process
_tls = threading.local()
popen_attr = 'spawning_popen'
else:
from multiprocessing.forking import Popen
_tls = Popen._tls
popen_attr = 'process_handle'
BaseContext = object
def get_spawning_popen():
return getattr(_tls, popen_attr, None)
def set_spawning_popen(popen):
setattr(_tls, popen_attr, popen)
def assert_spawning(obj):
if get_spawning_popen() is None:
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(obj).__name__
)
def get_context(method=None):
method = method or _DEFAULT_START_METHOD or 'loky'
if method == "loky":
return LokyContext()
elif method == "loky_init_main":
return LokyInitMainContext()
else:
raise ValueError("Unknown context '{}'. Value should be in {}."
.format(method, START_METHODS))
def set_start_method(method, force=False):
global _DEFAULT_START_METHOD
if _DEFAULT_START_METHOD is not None and not force:
raise RuntimeError('context has already been set')
assert method is None or method in START_METHODS, (
"'{}' is not a valid start_method. It should be in {}"
.format(method, START_METHODS))
_DEFAULT_START_METHOD = method
def get_start_method():
return _DEFAULT_START_METHOD
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs the current process can use.
The returned number of CPUs accounts for:
* the number of CPUs in the system, as given by
``multiprocessing.cpu_count``;
* the CPU affinity settings of the current process
(available with Python 3.4+ on some Unix systems);
* CFS scheduler CPU bandwidth limit (available on Linux only, typically
set by docker and similar container orchestration systems);
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
and is given as the minimum of these constraints.
If ``only_physical_cores`` is True, return the number of physical cores
instead of the number of logical cores (hyperthreading / SMT). Note that
this option is not enforced if the number of usable cores is controlled in
any other way such as: process affinity, restricting CFS scheduler policy
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
cores is not found, return the number of logical cores.
It is also always larger or equal to 1.
"""
# TODO: use os.cpu_count when dropping python 2 support
try:
cpu_count_mp = mp.cpu_count()
except NotImplementedError:
cpu_count_mp = 1
cpu_count_user = _cpu_count_user(cpu_count_mp)
aggregate_cpu_count = min(cpu_count_mp, cpu_count_user)
if only_physical_cores:
cpu_count_physical, exception = _count_physical_cores()
if cpu_count_user < cpu_count_mp:
# Respect user setting
cpu_count = max(cpu_count_user, 1)
elif cpu_count_physical == "not found":
# Fallback to default behavior
if exception is not None:
# warns only the first time
warnings.warn(
"Could not find the number of physical cores for the "
"following reason:\n" + str(exception) + "\n"
"Returning the number of logical cores instead. You can "
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
"the number of cores you want to use.")
if sys.version_info >= (3, 5):
# TODO remove the version check when dropping py2 support
traceback.print_tb(exception.__traceback__)
cpu_count = max(aggregate_cpu_count, 1)
else:
return cpu_count_physical
else:
cpu_count = max(aggregate_cpu_count, 1)
return cpu_count
def _cpu_count_user(cpu_count_mp):
"""Number of user defined available CPUs"""
import math
# Number of available CPUs given affinity settings
cpu_count_affinity = cpu_count_mp
if hasattr(os, 'sched_getaffinity'):
try:
cpu_count_affinity = len(os.sched_getaffinity(0))
except NotImplementedError:
pass
# CFS scheduler CPU bandwidth limit
# available in Linux since 2.6 kernel
cpu_count_cfs = cpu_count_mp
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
if os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
with open(cfs_quota_fname, 'r') as fh:
cfs_quota_us = int(fh.read())
with open(cfs_period_fname, 'r') as fh:
cfs_period_us = int(fh.read())
if cfs_quota_us > 0 and cfs_period_us > 0:
# Make sure this quantity is an int as math.ceil returns a
# float in python2.7. (See issue #165)
cpu_count_cfs = int(math.ceil(cfs_quota_us / cfs_period_us))
# User defined soft-limit passed as a loky specific environment variable.
cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', cpu_count_mp))
return min(cpu_count_affinity, cpu_count_cfs, cpu_count_loky)
def _count_physical_cores():
"""Return a tuple (number of physical cores, exception)
If the number of physical cores is found, exception is set to None.
If it has not been found, return ("not found", exception).
The number of physical cores is cached to avoid repeating subprocess calls.
"""
exception = None
# First check if the value is cached
global physical_cores_cache
if physical_cores_cache is not None:
return physical_cores_cache, exception
# Not cached yet, find it
try:
if sys.platform == "linux":
cpu_info = subprocess.run(
"lscpu --parse=core".split(" "), capture_output=True)
cpu_info = cpu_info.stdout.decode("utf-8").splitlines()
cpu_info = {line for line in cpu_info if not line.startswith("#")}
cpu_count_physical = len(cpu_info)
elif sys.platform == "win32":
cpu_info = subprocess.run(
"wmic CPU Get NumberOfCores /Format:csv".split(" "),
capture_output=True)
cpu_info = cpu_info.stdout.decode('utf-8').splitlines()
cpu_info = [l.split(",")[1] for l in cpu_info
if (l and l != "Node,NumberOfCores")]
cpu_count_physical = sum(map(int, cpu_info))
elif sys.platform == "darwin":
cpu_info = subprocess.run(
"sysctl -n hw.physicalcpu".split(" "), capture_output=True)
cpu_info = cpu_info.stdout.decode('utf-8')
cpu_count_physical = int(cpu_info)
else:
raise NotImplementedError(
"unsupported platform: {}".format(sys.platform))
# if cpu_count_physical < 1, we did not find a valid value
if cpu_count_physical < 1:
raise ValueError(
"found {} physical cores < 1".format(cpu_count_physical))
except Exception as e:
exception = e
cpu_count_physical = "not found"
# Put the result in cache
physical_cores_cache = cpu_count_physical
return cpu_count_physical, exception
class LokyContext(BaseContext):
"""Context relying on the LokyProcess."""
_name = 'loky'
Process = LokyProcess
cpu_count = staticmethod(cpu_count)
def Queue(self, maxsize=0, reducers=None):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, reducers=reducers,
ctx=self.get_context())
def SimpleQueue(self, reducers=None):
'''Returns a queue object'''
from .queues import SimpleQueue
return SimpleQueue(reducers=reducers, ctx=self.get_context())
if sys.version_info[:2] < (3, 4):
"""Compat for python2.7/3.3 for necessary methods in Context"""
def get_context(self):
return self
def get_start_method(self):
return self._name
def Pipe(self, duplex=True):
'''Returns two connection object connected by a pipe'''
return mp.Pipe(duplex)
if sys.platform != "win32":
"""Use the compat Manager for python2.7/3.3 on UNIX to avoid
relying on fork processes
"""
def Manager(self):
"""Returns a manager object"""
from .managers import LokyManager
m = LokyManager()
m.start()
return m
else:
"""Compat for context on Windows and python2.7/3.3. Using regular
multiprocessing objects as it does not rely on fork.
"""
from multiprocessing import synchronize
Semaphore = staticmethod(synchronize.Semaphore)
BoundedSemaphore = staticmethod(synchronize.BoundedSemaphore)
Lock = staticmethod(synchronize.Lock)
RLock = staticmethod(synchronize.RLock)
Condition = staticmethod(synchronize.Condition)
Event = staticmethod(synchronize.Event)
Manager = staticmethod(mp.Manager)
if sys.platform != "win32":
"""For Unix platform, use our custom implementation of synchronize
relying on ctypes to interface with pthread semaphores.
"""
def Semaphore(self, value=1):
"""Returns a semaphore object"""
from .synchronize import Semaphore
return Semaphore(value=value)
def BoundedSemaphore(self, value):
"""Returns a bounded semaphore object"""
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Lock(self):
"""Returns a lock object"""
from .synchronize import Lock
return Lock()
def RLock(self):
"""Returns a recurrent lock object"""
from .synchronize import RLock
return RLock()
def Condition(self, lock=None):
"""Returns a condition object"""
from .synchronize import Condition
return Condition(lock)
def Event(self):
"""Returns an event object"""
from .synchronize import Event
return Event()
class LokyInitMainContext(LokyContext):
"""Extra context with LokyProcess, which does load the main module
This context is used for compatibility in the case ``cloudpickle`` is not
present on the running system. This permits to load functions defined in
the ``main`` module, using proper safeguards. The declaration of the
``executor`` should be protected by ``if __name__ == "__main__":`` and the
functions and variable used from main should be out of this block.
This mimics the default behavior of multiprocessing under Windows and the
behavior of the ``spawn`` start method on a posix system for python3.4+.
For more details, see the end of the following section of python doc
https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
"""
_name = 'loky_init_main'
Process = LokyInitMainProcess
if sys.version_info > (3, 4):
"""Register loky context so it works with multiprocessing.get_context"""
ctx_loky = LokyContext()
mp.context._concrete_contexts['loky'] = ctx_loky
mp.context._concrete_contexts['loky_init_main'] = LokyInitMainContext()

View file

@ -0,0 +1,48 @@
###############################################################################
# Launch a subprocess using forkexec and make sure only the needed fd are
# shared in the two process.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
if sys.platform == "darwin" and sys.version_info < (3, 3):
FileNotFoundError = OSError
def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set(fd for fd in range(3, max_nfds))
open_fds.add(0)
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass
def fork_exec(cmd, keep_fds, env=None):
# copy the environment variables to set in the child process
env = {} if env is None else env
child_env = os.environ.copy()
child_env.update(env)
pid = os.fork()
if pid == 0: # pragma: no cover
close_fds(keep_fds)
os.execve(sys.executable, cmd, child_env)
else:
return pid

View file

@ -0,0 +1,51 @@
###############################################################################
# compat for UNIX 2.7 and 3.3
# Manager with LokyContext server.
# This avoids having a Manager using fork and breaks the fd.
#
# author: Thomas Moreau and Olivier Grisel
#
# based on multiprocessing/managers.py (17/02/2017)
# * Overload the start method to use LokyContext and launch a loky subprocess
#
import multiprocessing as mp
from multiprocessing.managers import SyncManager, State
from .process import LokyProcess as Process
class LokyManager(SyncManager):
def start(self, initializer=None, initargs=()):
'''Spawn a server process for this manager object'''
assert self._state.value == State.INITIAL
if (initializer is not None
and not hasattr(initializer, '__call__')):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = mp.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, bytes(self._authkey),
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = mp.util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)

View file

@ -0,0 +1,215 @@
###############################################################################
# Popen for LokyProcess.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
import signal
import pickle
from io import BytesIO
from . import reduction, spawn
from .context import get_spawning_popen, set_spawning_popen
from multiprocessing import util, process
if sys.version_info[:2] < (3, 3):
ProcessLookupError = OSError
if sys.platform != "win32":
from . import resource_tracker
__all__ = []
if sys.platform != "win32":
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, fd):
self.fd = reduction._mk_inheritable(fd)
def detach(self):
return self.fd
#
# Start child process using subprocess.Popen
#
__all__.append('Popen')
class Popen(object):
method = 'loky'
DupFd = _DupFd
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._fds = []
self._launch(process_obj)
if sys.version_info < (3, 4):
@classmethod
def duplicate_for_child(cls, fd):
popen = get_spawning_popen()
popen._fds.append(fd)
return reduction._mk_inheritable(fd)
else:
def duplicate_for_child(self, fd):
self._fds.append(fd)
return reduction._mk_inheritable(fd)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if sys.version_info < (3, 3):
import time
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
if self.returncode is None:
if timeout is not None:
from multiprocessing.connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
tracker_fd = resource_tracker._resource_tracker.getfd()
fp = BytesIO()
set_spawning_popen(self)
try:
prep_data = spawn.get_preparation_data(
process_obj._name,
getattr(process_obj, "init_main_module", True))
reduction.dump(prep_data, fp)
reduction.dump(process_obj, fp)
finally:
set_spawning_popen(None)
try:
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
# for fd in self._fds:
# _mk_inheritable(fd)
cmd_python = [sys.executable]
cmd_python += ['-m', self.__module__]
cmd_python += ['--process-name', str(process_obj.name)]
cmd_python += ['--pipe',
str(reduction._mk_inheritable(child_r))]
reduction._mk_inheritable(child_w)
reduction._mk_inheritable(tracker_fd)
self._fds.extend([child_r, child_w, tracker_fd])
if sys.version_info >= (3, 8) and os.name == 'posix':
mp_tracker_fd = prep_data['mp_tracker_args']['fd']
self.duplicate_for_child(mp_tracker_fd)
from .fork_exec import fork_exec
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
util.debug("launched python with pid {} and cmd:\n{}"
.format(pid, cmd_python))
self.sentinel = parent_r
method = 'getbuffer'
if not hasattr(fp, method):
method = 'getvalue'
with os.fdopen(parent_w, 'wb') as f:
f.write(getattr(fp, method)())
self.pid = pid
finally:
if parent_r is not None:
util.Finalize(self, os.close, (parent_r,))
for fd in (child_r, child_w):
if fd is not None:
os.close(fd)
@staticmethod
def thread_is_spawning():
return True
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Command line parser')
parser.add_argument('--pipe', type=int, required=True,
help='File handle for the pipe')
parser.add_argument('--process-name', type=str, default=None,
help='Identifier for debugging purpose')
args = parser.parse_args()
info = dict()
exitcode = 1
try:
with os.fdopen(args.pipe, 'rb') as from_parent:
process.current_process()._inheriting = True
try:
prep_data = pickle.load(from_parent)
spawn.prepare(prep_data)
process_obj = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
exitcode = process_obj._bootstrap()
except Exception:
print('\n\n' + '-' * 80)
print('{} failed with traceback: '.format(args.process_name))
print('-' * 80)
import traceback
print(traceback.format_exc())
print('\n' + '-' * 80)
finally:
if from_parent is not None:
from_parent.close()
sys.exit(exitcode)

View file

@ -0,0 +1,173 @@
import os
import sys
from pickle import load
from multiprocessing import process, util
from . import spawn
from . import reduction
from .context import get_spawning_popen, set_spawning_popen
if sys.platform == "win32":
# Avoid import error by code introspection tools such as test runners
# trying to import this module while running on non-Windows systems.
import msvcrt
from .compat_win32 import _winapi
from .compat_win32 import Popen as _Popen
from .reduction import duplicate
else:
_Popen = object
if sys.version_info[:2] < (3, 3):
from os import fdopen as open
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
def _path_eq(p1, p2):
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
WINENV = (hasattr(sys, "_base_executable")
and not _path_eq(sys.executable, sys._base_executable))
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(_Popen):
'''
Start a subprocess to run the code of a process object
'''
method = 'loky'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(
process_obj._name, getattr(process_obj, "init_main_module", True))
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rfd, wfd = os.pipe()
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
python_exe = spawn.get_executable()
# copy the environment variables to set in the child process
child_env = os.environ.copy()
child_env.update(process_obj.env)
# bpo-35797: When running in a venv, we bypass the redirect
# executor and launch our base Python.
if WINENV and _path_eq(python_exe, sys.executable):
python_exe = sys._base_executable
child_env["__PYVENV_LAUNCHER__"] = sys.executable
try:
with open(wfd, 'wb') as to_child:
# start process
try:
# This flag allows to pass inheritable handles from the
# parent to the child process in a python2-3 compatible way
# (see
# https://github.com/tomMoral/loky/pull/204#discussion_r290719629
# for more detail). When support for Python 2 is dropped,
# the cleaner multiprocessing.reduction.steal_handle should
# be used instead.
inherit = True
hp, ht, pid, tid = _winapi.CreateProcess(
python_exe, cmd,
None, None, inherit, 0,
child_env, None, None)
_winapi.CloseHandle(ht)
except BaseException:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
set_spawning_popen(self)
if sys.version_info[:2] < (3, 4):
Popen._tls.process_handle = int(hp)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
set_spawning_popen(None)
if sys.version_info[:2] < (3, 4):
del Popen._tls.process_handle
except IOError as exc:
# IOError 22 happens when the launched subprocess terminated before
# wfd.close is called. Thus we can safely ignore it.
if exc.errno != 22:
raise
util.debug("While starting {}, ignored a IOError 22"
.format(process_obj._name))
def duplicate_for_child(self, handle):
assert self is get_spawning_popen()
return duplicate(handle, self.sentinel)
def get_command_line(pipe_handle, **kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return ([sys.executable, '--multiprocessing-fork', pipe_handle])
else:
prog = 'from joblib.externals.loky.backend.popen_loky_win32 import main; main()'
opts = util._args_from_interpreter_flags()
return [spawn.get_executable()] + opts + [
'-c', prog, '--multiprocessing-fork', pipe_handle]
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def main():
'''
Run code specified by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
spawn.prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)

View file

@ -0,0 +1,108 @@
###############################################################################
# LokyProcess implementation
#
# authors: Thomas Moreau and Olivier Grisel
#
# based on multiprocessing/process.py (17/02/2017)
# * Add some compatibility function for python2.7 and 3.3
#
import os
import sys
from .compat import BaseProcess
class LokyProcess(BaseProcess):
_start_method = 'loky'
def __init__(self, group=None, target=None, name=None, args=(),
kwargs={}, daemon=None, init_main_module=False,
env=None):
if sys.version_info < (3, 3):
super(LokyProcess, self).__init__(
group=group, target=target, name=name, args=args,
kwargs=kwargs)
self.daemon = daemon
else:
super(LokyProcess, self).__init__(
group=group, target=target, name=name, args=args,
kwargs=kwargs, daemon=daemon)
self.env = {} if env is None else env
self.authkey = self.authkey
self.init_main_module = init_main_module
@staticmethod
def _Popen(process_obj):
if sys.platform == "win32":
from .popen_loky_win32 import Popen
else:
from .popen_loky_posix import Popen
return Popen(process_obj)
if sys.version_info < (3, 3):
def start(self):
'''
Start child process
'''
from multiprocessing.process import _current_process, _cleanup
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_current_process._children.add(self)
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
if sys.version_info < (3, 4):
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationKey(authkey)
def _bootstrap(self):
from .context import set_start_method
set_start_method(self._start_method)
super(LokyProcess, self)._bootstrap()
class LokyInitMainProcess(LokyProcess):
_start_method = 'loky_init_main'
def __init__(self, group=None, target=None, name=None, args=(),
kwargs={}, daemon=None):
super(LokyInitMainProcess, self).__init__(
group=group, target=target, name=name, args=args, kwargs=kwargs,
daemon=daemon, init_main_module=True)
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationKey(bytes):
def __reduce__(self):
from .context import assert_spawning
try:
assert_spawning(self)
except RuntimeError:
raise TypeError(
'Pickling an AuthenticationKey object is '
'disallowed for security reasons'
)
return AuthenticationKey, (bytes(self),)

View file

@ -0,0 +1,247 @@
###############################################################################
# Queue and SimpleQueue implementation for loky
#
# authors: Thomas Moreau, Olivier Grisel
#
# based on multiprocessing/queues.py (16/02/2017)
# * Add some compatibility function for python2.7 and 3.3 and makes sure
# it uses the right synchronization primitive.
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
# pickling process. (overload Queue._feed/SimpleQueue.put)
#
import os
import sys
import errno
import weakref
import threading
from multiprocessing import util
from multiprocessing import connection
from multiprocessing.synchronize import SEM_VALUE_MAX
from multiprocessing.queues import Full
from multiprocessing.queues import _sentinel, Queue as mp_Queue
from multiprocessing.queues import SimpleQueue as mp_SimpleQueue
from .reduction import loads, dumps
from .context import assert_spawning, get_context
__all__ = ['Queue', 'SimpleQueue', 'Full']
class Queue(mp_Queue):
def __init__(self, maxsize=0, reducers=None, ctx=None):
if sys.version_info[:2] >= (3, 4):
super().__init__(maxsize=maxsize, ctx=ctx)
else:
if maxsize <= 0:
# Can raise ImportError (see issues #3770 and #23400)
maxsize = SEM_VALUE_MAX
if ctx is None:
ctx = get_context()
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
util.register_after_fork(self, Queue._after_fork)
self._reducers = reducers
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._reducers, self._rlock, self._wlock, self._sem,
self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._reducers, self._rlock, self._wlock, self._sem,
self._opid) = state
if sys.version_info >= (3, 9):
self._reset()
else:
self._after_fork()
# Overload _start_thread to correctly call our custom _feed
def _start_thread(self):
util.debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._reducers,
self._ignore_epipe, self._on_queue_feeder_error, self._sem),
name='QueueFeederThread'
)
self._thread.daemon = True
util.debug('doing self._thread.start()')
self._thread.start()
util.debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = util.Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = util.Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
# Overload the _feed methods to use our custom pickling strategy.
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, reducers,
ignore_epipe, onerror, queue_sem):
util.debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
while 1:
try:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
util.debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj_ = dumps(obj, reducers=reducers)
if wacquire is None:
send_bytes(obj_)
else:
wacquire()
try:
send_bytes(obj_)
finally:
wrelease()
# Remove references early to avoid leaking memory
del obj, obj_
except IndexError:
pass
except BaseException as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
if util.is_exiting():
util.info('error in queue thread: %s', e)
return
else:
queue_sem.release()
onerror(e, obj)
def _on_queue_feeder_error(self, e, obj):
"""
Private API hook called when feeding data in the background thread
raises an exception. For overriding by concurrent.futures.
"""
import traceback
traceback.print_exc()
if sys.version_info[:2] < (3, 4):
# Compat for python2.7/3.3 that use _send instead of _send_bytes
def _after_fork(self):
super(Queue, self)._after_fork()
self._send_bytes = self._writer.send_bytes
class SimpleQueue(mp_SimpleQueue):
def __init__(self, reducers=None, ctx=None):
if sys.version_info[:2] >= (3, 4):
super().__init__(ctx=ctx)
else:
# Use the context to create the sync objects for python2.7/3.3
if ctx is None:
ctx = get_context()
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
# Add possiblity to use custom reducers
self._reducers = reducers
def close(self):
self._reader.close()
self._writer.close()
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._reducers, self._rlock,
self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._reducers, self._rlock,
self._wlock) = state
if sys.version_info[:2] < (3, 4):
# For python2.7/3.3, overload get to avoid creating deadlocks with
# unpickling errors.
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return loads(res)
# Overload put to use our customizable reducer
def put(self, obj):
# serialize the data before acquiring the lock
obj = dumps(obj, reducers=self._reducers)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)

View file

@ -0,0 +1,280 @@
###############################################################################
# Customizable Pickler with some basic reducers
#
# author: Thomas Moreau
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Replace the ForkingPickler with a similar _LokyPickler,
# * Add CustomizableLokyPickler to allow customizing pickling process
# on the fly.
#
import io
import os
import sys
import functools
from multiprocessing import util
import types
try:
# Python 2 compat
from cPickle import loads as pickle_loads
except ImportError:
from pickle import loads as pickle_loads
import copyreg
from pickle import HIGHEST_PROTOCOL
if sys.platform == "win32":
if sys.version_info[:2] > (3, 3):
from multiprocessing.reduction import duplicate
else:
from multiprocessing.forking import duplicate
###############################################################################
# Enable custom pickling in Loky.
# To allow instance customization of the pickling process, we use 2 classes.
# _ReducerRegistry gives module level customization and CustomizablePickler
# permits to use instance base custom reducers. Only CustomizablePickler
# should be used.
class _ReducerRegistry(object):
"""Registry for custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
dispatch_table = {}
@classmethod
def register(cls, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(cls, obj):
reduced = reduce_func(obj)
cls.save_reduce(obj=obj, *reduced)
cls.dispatch_table[type] = dispatcher
else:
cls.dispatch_table[type] = reduce_func
###############################################################################
# Registers extra pickling routines to improve picklization for loky
register = _ReducerRegistry.register
# make methods picklable
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
@classmethod
def h(cls):
pass
register(type(_C().f), _reduce_method)
register(type(_C.h), _reduce_method)
if not hasattr(sys, "pypy_version_info"):
# PyPy uses functions instead of method_descriptors and wrapper_descriptors
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
# Make partial func pickable
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
if sys.platform != "win32":
from ._posix_reduction import _mk_inheritable # noqa: F401
else:
from . import _win_reduction # noqa: F401
# global variable to change the pickler behavior
try:
from joblib.externals import cloudpickle # noqa: F401
DEFAULT_ENV = "cloudpickle"
except ImportError:
# If cloudpickle is not present, fallback to pickle
DEFAULT_ENV = "pickle"
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
_LokyPickler = None
_loky_pickler_name = None
def set_loky_pickler(loky_pickler=None):
global _LokyPickler, _loky_pickler_name
if loky_pickler is None:
loky_pickler = ENV_LOKY_PICKLER
loky_pickler_cls = None
# The default loky_pickler is cloudpickle
if loky_pickler in ["", None]:
loky_pickler = "cloudpickle"
if loky_pickler == _loky_pickler_name:
return
if loky_pickler == "cloudpickle":
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
else:
try:
from importlib import import_module
module_pickle = import_module(loky_pickler)
loky_pickler_cls = module_pickle.Pickler
except (ImportError, AttributeError) as e:
extra_info = ("\nThis error occurred while setting loky_pickler to"
" '{}', as required by the env variable LOKY_PICKLER"
" or the function set_loky_pickler."
.format(loky_pickler))
e.args = (e.args[0] + extra_info,) + e.args[1:]
e.msg = e.args[0]
raise e
util.debug("Using '{}' for serialization."
.format(loky_pickler if loky_pickler else "cloudpickle"))
class CustomizablePickler(loky_pickler_cls):
_loky_pickler_cls = loky_pickler_cls
def _set_dispatch_table(self, dispatch_table):
for ancestor_class in self._loky_pickler_cls.mro():
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
if isinstance(dt_attribute, types.MemberDescriptorType):
# Ancestor class (typically _pickle.Pickler) has a
# member_descriptor for its "dispatch_table" attribute. Use
# it to set the dispatch_table as a member instead of a
# dynamic attribute in the __dict__ of the instance,
# otherwise it will not be taken into account by the C
# implementation of the dump method if a subclass defines a
# class-level dispatch_table attribute as was done in
# cloudpickle 1.6.0:
# https://github.com/joblib/loky/pull/260
dt_attribute.__set__(self, dispatch_table)
break
# On top of member descriptor set, also use setattr such that code
# that directly access self.dispatch_table gets a consistent view
# of the same table.
self.dispatch_table = dispatch_table
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
loky_pickler_cls.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if sys.version_info < (3,):
self.dispatch = loky_pickler_cls.dispatch.copy()
self.dispatch.update(_ReducerRegistry.dispatch_table)
else:
if hasattr(self, "dispatch_table"):
# Force a copy that we will update without mutating the
# any class level defined dispatch_table.
loky_dt = dict(self.dispatch_table)
else:
# Use standard reducers as bases
loky_dt = copyreg.dispatch_table.copy()
# Register loky specific reducers
loky_dt.update(_ReducerRegistry.dispatch_table)
# Set the new dispatch table, taking care of the fact that we
# need to use the member_descriptor when we inherit from a
# subclass of the C implementation of the Pickler base class
# with an class level dispatch_table attribute.
self._set_dispatch_table(loky_dt)
# Register custom reducers
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table.
"""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
_LokyPickler = CustomizablePickler
_loky_pickler_name = loky_pickler
def get_loky_pickler_name():
global _loky_pickler_name
return _loky_pickler_name
def get_loky_pickler():
global _LokyPickler
return _LokyPickler
# Set it to its default value
set_loky_pickler()
def loads(buf):
# Compat for python2.7 version
if sys.version_info < (3, 3) and isinstance(buf, io.BytesIO):
buf = buf.getvalue()
return pickle_loads(buf)
def dump(obj, file, reducers=None, protocol=None):
'''Replacement for pickle.dump() using _LokyPickler.'''
global _LokyPickler
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
def dumps(obj, reducers=None, protocol=None):
global _LokyPickler
buf = io.BytesIO()
dump(obj, buf, reducers=reducers, protocol=protocol)
if sys.version_info < (3, 3):
return buf.getvalue()
return buf.getbuffer()
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
if sys.platform == "win32":
__all__ += ["duplicate"]

View file

@ -0,0 +1,380 @@
###############################################################################
# Server process to keep track of unlinked resources, like folders and
# semaphores and clean them.
#
# author: Thomas Moreau
#
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
# * include custom spawnv_passfds to start the process
# * use custom unlink from our own SemLock implementation
# * add some VERBOSE logging
#
#
# On Unix we run a server process which keeps track of unlinked
# resources. The server ignores SIGINT and SIGTERM and reads from a
# pipe. The resource_tracker implements a reference counting scheme: each time
# a Python process anticipates the shared usage of a resource by another
# process, it signals the resource_tracker of this shared usage, and in return,
# the resource_tracker increments the resource's reference count by 1.
# Similarly, when access to a resource is closed by a Python process, the
# process notifies the resource_tracker by asking it to decrement the
# resource's reference count by 1. When the reference count drops to 0, the
# resource_tracker attempts to clean up the underlying resource.
# Finally, every other process connected to the resource tracker has a copy of
# the writable end of the pipe used to communicate with it, so the resource
# tracker gets EOF when all other processes have exited. Then the
# resource_tracker process unlinks any remaining leaked resources (with
# reference count above 0)
# For semaphores, this is important because the system only supports a limited
# number of named semaphores, and they will not be automatically removed till
# the next reboot. Without this resource tracker process, "killall python"
# would probably leave unlinked semaphores.
# Note that this behavior differs from CPython's resource_tracker, which only
# implements list of shared resources, and not a proper refcounting scheme.
# Also, CPython's resource tracker will only attempt to cleanup those shared
# resources once all procsses connected to the resouce tracker have exited.
import os
import shutil
import sys
import signal
import warnings
import threading
from . import spawn
from multiprocessing import util
if sys.platform == "win32":
from .compat_win32 import _winapi
from .reduction import duplicate
import msvcrt
try:
from _multiprocessing import sem_unlink
except ImportError:
from .semlock import sem_unlink
if sys.version_info < (3,):
BrokenPipeError = OSError
from os import fdopen as open
__all__ = ['ensure_running', 'register', 'unregister']
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
_CLEANUP_FUNCS = {
'folder': shutil.rmtree,
'file': os.unlink
}
if os.name == "posix":
_CLEANUP_FUNCS['semlock'] = sem_unlink
VERBOSE = False
class ResourceTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that resource tracker process is running.
This can be run from any process. Usually a child process will use
the resource created by its parent.'''
with self._lock:
if self._fd is not None:
# resource tracker was launched before, is it still running?
if self._check_alive():
# => still alive
return
# => dead, launch it again
os.close(self._fd)
if os.name == "posix":
try:
# At this point, the resource_tracker process has been
# killed or crashed. Let's remove the process entry
# from the process table to avoid zombie processes.
os.waitpid(self._pid, 0)
except OSError:
# The process was terminated or is a child from an
# ancestor of the current process.
pass
self._fd = None
self._pid = None
warnings.warn('resource_tracker: process died unexpectedly, '
'relaunching. Some folders/sempahores might '
'leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
r, w = os.pipe()
if sys.platform == "win32":
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
os.close(r)
r = _r
cmd = 'from {} import main; main({}, {})'.format(
main.__module__, r, VERBOSE)
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
# In python 3.3, there is a bug which put `-RRRRR..` instead of
# `-R` in args. Replace it to get the correct flags.
# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488
if sys.version_info[:2] <= (3, 3):
import re
for i in range(1, len(args)):
args[i] = re.sub("-R+", "-R", args[i])
args += ['-c', cmd]
util.debug("launching resource tracker: {}".format(args))
# bpo-33613: Register a signal mask that will block the
# signals. This signal mask will be inherited by the child
# that is going to be spawned and will protect the child from a
# race condition that can make the child die before it
# registers signal handlers for SIGINT and SIGTERM. The mask is
# unregistered after spawning the child.
try:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_BLOCK,
_IGNORED_SIGNALS)
pid = spawnv_passfds(exe, args, fds_to_pass)
finally:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK,
_IGNORED_SIGNALS)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
if sys.platform == "win32":
_winapi.CloseHandle(r)
else:
os.close(r)
def _check_alive(self):
'''Check for the existence of the resource tracker process.'''
try:
self._send('PROBE', '', '')
except BrokenPipeError:
return False
else:
return True
def register(self, name, rtype):
'''Register a named resource, and increment its refcount.'''
self.ensure_running()
self._send('REGISTER', name, rtype)
def unregister(self, name, rtype):
'''Unregister a named resource with resource tracker.'''
self.ensure_running()
self._send('UNREGISTER', name, rtype)
def maybe_unlink(self, name, rtype):
'''Decrement the refcount of a resource, and delete it if it hits 0'''
self.ensure_running()
self._send("MAYBE_UNLINK", name, rtype)
def _send(self, cmd, name, rtype):
msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
maybe_unlink = _resource_tracker.maybe_unlink
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd
def main(fd, verbose=0):
'''Run resource tracker.'''
# protect the process from ^C and "killall python" etc
if verbose:
util.log_to_stderr(level=util.DEBUG)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if verbose:
util.debug("Main resource tracker is running")
registry = {rtype: dict() for rtype in _CLEANUP_FUNCS.keys()}
try:
# keep track of registered/unregistered resources
if sys.platform == "win32":
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
with open(fd, 'rb') as f:
while True:
line = f.readline()
if line == b'': # EOF
break
try:
splitted = line.strip().decode('ascii').split(':')
# name can potentially contain separator symbols (for
# instance folders on Windows)
cmd, name, rtype = (
splitted[0], ':'.join(splitted[1:-1]), splitted[-1])
if cmd == 'PROBE':
continue
if rtype not in _CLEANUP_FUNCS:
raise ValueError(
'Cannot register {} for automatic cleanup: '
'unknown resource type ({}). Resource type should '
'be one of the following: {}'.format(
name, rtype, list(_CLEANUP_FUNCS.keys())))
if cmd == 'REGISTER':
if name not in registry[rtype]:
registry[rtype][name] = 1
else:
registry[rtype][name] += 1
if verbose:
util.debug(
"[ResourceTracker] incremented refcount of {} "
"{} (current {})".format(
rtype, name, registry[rtype][name]))
elif cmd == 'UNREGISTER':
del registry[rtype][name]
if verbose:
util.debug(
"[ResourceTracker] unregister {} {}: "
"registry({})".format(name, rtype, len(registry)))
elif cmd == 'MAYBE_UNLINK':
registry[rtype][name] -= 1
if verbose:
util.debug(
"[ResourceTracker] decremented refcount of {} "
"{} (current {})".format(
rtype, name, registry[rtype][name]))
if registry[rtype][name] == 0:
del registry[rtype][name]
try:
if verbose:
util.debug(
"[ResourceTracker] unlink {}"
.format(name))
_CLEANUP_FUNCS[rtype](name)
except Exception as e:
warnings.warn(
'resource_tracker: %s: %r' % (name, e))
else:
raise RuntimeError('unrecognized command %r' % cmd)
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining resources
def _unlink_resources(rtype_registry, rtype):
if rtype_registry:
try:
warnings.warn('resource_tracker: There appear to be %d '
'leaked %s objects to clean up at shutdown' %
(len(rtype_registry), rtype))
except Exception:
pass
for name in rtype_registry:
# For some reason the process which created and registered this
# resource has failed to unregister it. Presumably it has
# died. We therefore clean it up.
try:
_CLEANUP_FUNCS[rtype](name)
if verbose:
util.debug("[ResourceTracker] unlink {}"
.format(name))
except Exception as e:
warnings.warn('resource_tracker: %s: %r' % (name, e))
for rtype, rtype_registry in registry.items():
if rtype == "folder":
continue
else:
_unlink_resources(rtype_registry, rtype)
# The default cleanup routine for folders deletes everything inside
# those folders recursively, which can include other resources tracked
# by the resource tracker). To limit the risk of the resource tracker
# attempting to delete twice a resource (once as part of a tracked
# folder, and once as a resource), we delete the folders after all
# other resource types.
if "folder" in registry:
_unlink_resources(registry["folder"], "folder")
if verbose:
util.debug("resource tracker shut down")
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
if sys.platform != "win32":
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
_pass = []
for fd in passfds:
_pass += [_mk_inheritable(fd)]
from .fork_exec import fork_exec
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
else:
cmd = ' '.join('"%s"' % x for x in args)
try:
hp, ht, pid, tid = _winapi.CreateProcess(
path, cmd, None, None, True, 0, None, None, None)
_winapi.CloseHandle(ht)
except BaseException:
pass
return pid

View file

@ -0,0 +1,274 @@
###############################################################################
# Ctypes implementation for posix semaphore.
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from cpython/Modules/_multiprocessing/semaphore.c (17/02/2017)
# * use ctypes to access pthread semaphores and provide a full python
# semaphore management.
# * For OSX, as no sem_getvalue is not implemented, Semaphore with value > 1
# are not guaranteed to work.
# * Only work with LokyProcess on posix
#
import os
import sys
import time
import errno
import ctypes
import tempfile
import threading
from ctypes.util import find_library
# As we need to use ctypes return types for semlock object, failure value
# needs to be cast to proper python value. Unix failure convention is to
# return 0, whereas OSX returns -1
SEM_FAILURE = ctypes.c_void_p(0).value
if sys.platform == 'darwin':
SEM_FAILURE = ctypes.c_void_p(-1).value
# Semaphore types
RECURSIVE_MUTEX = 0
SEMAPHORE = 1
# Semaphore constants
SEM_OFLAG = ctypes.c_int(os.O_CREAT | os.O_EXCL)
SEM_PERM = ctypes.c_int(384)
class timespec(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
if sys.platform != 'win32':
pthread = ctypes.CDLL(find_library('pthread'), use_errno=True)
pthread.sem_open.restype = ctypes.c_void_p
pthread.sem_close.argtypes = [ctypes.c_void_p]
pthread.sem_wait.argtypes = [ctypes.c_void_p]
pthread.sem_trywait.argtypes = [ctypes.c_void_p]
pthread.sem_post.argtypes = [ctypes.c_void_p]
pthread.sem_getvalue.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
pthread.sem_unlink.argtypes = [ctypes.c_char_p]
if sys.platform != "darwin":
pthread.sem_timedwait.argtypes = [ctypes.c_void_p,
ctypes.POINTER(timespec)]
try:
from threading import get_ident
except ImportError:
def get_ident():
return threading.current_thread().ident
if sys.version_info[:2] < (3, 3):
class FileExistsError(OSError):
pass
class FileNotFoundError(OSError):
pass
def sem_unlink(name):
if pthread.sem_unlink(name.encode('ascii')) < 0:
raiseFromErrno()
def _sem_open(name, value=None):
""" Construct or retrieve a semaphore with the given name
If value is None, try to retrieve an existing named semaphore.
Else create a new semaphore with the given value
"""
if value is None:
handle = pthread.sem_open(ctypes.c_char_p(name), 0)
else:
handle = pthread.sem_open(ctypes.c_char_p(name), SEM_OFLAG, SEM_PERM,
ctypes.c_int(value))
if handle == SEM_FAILURE:
e = ctypes.get_errno()
if e == errno.EEXIST:
raise FileExistsError("a semaphore named %s already exists" % name)
elif e == errno.ENOENT:
raise FileNotFoundError('cannot find semaphore named %s' % name)
elif e == errno.ENOSYS:
raise NotImplementedError('No semaphore implementation on this '
'system')
else:
raiseFromErrno()
return handle
def _sem_timedwait(handle, timeout):
t_start = time.time()
if sys.platform != "darwin":
sec = int(timeout)
tv_sec = int(t_start)
nsec = int(1e9 * (timeout - sec) + .5)
tv_nsec = int(1e9 * (t_start - tv_sec) + .5)
deadline = timespec(sec+tv_sec, nsec+tv_nsec)
deadline.tv_sec += int(deadline.tv_nsec / 1000000000)
deadline.tv_nsec %= 1000000000
return pthread.sem_timedwait(handle, ctypes.pointer(deadline))
# PERFORMANCE WARNING
# No sem_timedwait on OSX so we implement our own method. This method can
# degrade performances has the wait can have a latency up to 20 msecs
deadline = t_start + timeout
delay = 0
now = time.time()
while True:
# Poll the sem file
res = pthread.sem_trywait(handle)
if res == 0:
return 0
else:
e = ctypes.get_errno()
if e != errno.EAGAIN:
raiseFromErrno()
# check for timeout
now = time.time()
if now > deadline:
ctypes.set_errno(errno.ETIMEDOUT)
return -1
# calculate how much time left and check the delay is not too long
# -- maximum is 20 msecs
difference = (deadline - now)
delay = min(delay, 20e-3, difference)
# Sleep and increase delay
time.sleep(delay)
delay += 1e-3
class SemLock(object):
"""ctypes wrapper to the unix semaphore"""
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, name=None, unlink_now=False):
self.count = 0
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
self.name = name
self.handle = _sem_open(self.name.encode('ascii'), value)
def __del__(self):
try:
res = pthread.sem_close(self.handle)
assert res == 0, "Issue while closing semaphores"
except AttributeError:
pass
def _is_mine(self):
return self.count > 0 and get_ident() == self.ident
def acquire(self, block=True, timeout=None):
if self.kind == RECURSIVE_MUTEX and self._is_mine():
self.count += 1
return True
if block and timeout is None:
res = pthread.sem_wait(self.handle)
elif not block or timeout <= 0:
res = pthread.sem_trywait(self.handle)
else:
res = _sem_timedwait(self.handle, timeout)
if res < 0:
e = ctypes.get_errno()
if e == errno.EINTR:
return None
elif e in [errno.EAGAIN, errno.ETIMEDOUT]:
return False
raiseFromErrno()
self.count += 1
self.ident = get_ident()
return True
def release(self):
if self.kind == RECURSIVE_MUTEX:
assert self._is_mine(), (
"attempt to release recursive lock not owned by thread")
if self.count > 1:
self.count -= 1
return
assert self.count == 1
else:
if sys.platform == 'darwin':
# Handle broken get_value for mac ==> only Lock will work
# as sem_get_value do not work properly
if self.maxvalue == 1:
if pthread.sem_trywait(self.handle) < 0:
e = ctypes.get_errno()
if e != errno.EAGAIN:
raise OSError(e, errno.errorcode[e])
else:
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
else:
raise ValueError(
"semaphore or lock released too many times")
else:
import warnings
warnings.warn("semaphore are broken on OSX, release might "
"increase its maximal value", RuntimeWarning)
else:
value = self._get_value()
if value >= self.maxvalue:
raise ValueError(
"semaphore or lock released too many times")
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
self.count -= 1
def _get_value(self):
value = ctypes.pointer(ctypes.c_int(-1))
if pthread.sem_getvalue(self.handle, value) < 0:
raiseFromErrno()
return value.contents.value
def _count(self):
return self.count
def _is_zero(self):
if sys.platform == 'darwin':
# Handle broken get_value for mac ==> only Lock will work
# as sem_get_value do not work properly
if pthread.sem_trywait(self.handle) < 0:
e = ctypes.get_errno()
if e == errno.EAGAIN:
return True
raise OSError(e, errno.errorcode[e])
else:
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
return False
else:
value = ctypes.pointer(ctypes.c_int(-1))
if pthread.sem_getvalue(self.handle, value) < 0:
raiseFromErrno()
return value.contents.value == 0
def _after_fork(self):
self.count = 0
@staticmethod
def _rebuild(handle, kind, maxvalue, name):
self = SemLock.__new__(SemLock)
self.count = 0
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
self.name = name
self.handle = _sem_open(name.encode('ascii'))
return self
def raiseFromErrno():
e = ctypes.get_errno()
raise OSError(e, errno.errorcode[e])

View file

@ -0,0 +1,258 @@
###############################################################################
# Prepares and processes the data to setup the new process environment
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/spawn.py (17/02/2017)
# * Improve logging data
#
import os
import sys
import runpy
import types
from multiprocessing import process, util
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
import msvcrt
from .reduction import duplicate
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def get_executable():
return _python_exe
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name, init_main_module=True):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=bytes(process.current_process().authkey),
name=name,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd()
)
# Send sys_path and make sure the current directory will not be changed
sys_path = [p for p in sys.path]
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d['sys_path'] = sys_path
# Make sure to pass the information if the multiprocessing logger is active
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
if len(util._logger.handlers) > 0:
h = util._logger.handlers[0]
d['log_fmt'] = h.formatter._fmt
# Tell the child how to communicate with the resource_tracker
from .resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
d["tracker_args"] = {"pid": _resource_tracker._pid}
if sys.platform == "win32":
child_w = duplicate(
msvcrt.get_osfhandle(_resource_tracker._fd), inheritable=True)
d["tracker_args"]["fh"] = child_w
else:
d["tracker_args"]["fd"] = _resource_tracker._fd
if sys.version_info >= (3, 8) and os.name == 'posix':
# joblib/loky#242: allow loky processes to retrieve the resource
# tracker of their parent in case the child processes depickles
# shared_memory objects, that are still tracked by multiprocessing's
# resource_tracker by default.
# XXX: this is a workaround that may be error prone: in the future, it
# would be better to have loky subclass multiprocessing's shared_memory
# to force registration of shared_memory segments via loky's
# resource_tracker.
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
# multiprocessing's resource_tracker must be running before loky
# process is created (othewise the child won't be able to use it if it
# is created later on)
mp_resource_tracker.ensure_running()
d["mp_tracker_args"] = {
'fd': mp_resource_tracker._fd, 'pid': mp_resource_tracker._pid
}
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
if init_main_module:
main_module = sys.modules['__main__']
try:
main_mod_name = getattr(main_module.__spec__, "name", None)
except BaseException:
main_mod_name = None
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
# Compat for python2.7
d['main_path'] = d['init_main_from_path']
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'log_fmt' in data:
import logging
util.get_logger().handlers[0].setFormatter(
logging.Formatter(data['log_fmt'])
)
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'mp_tracker_args' in data:
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
mp_resource_tracker._fd = data['mp_tracker_args']['fd']
mp_resource_tracker._pid = data['mp_tracker_args']['pid']
if 'tracker_args' in data:
from .resource_tracker import _resource_tracker
_resource_tracker._pid = data["tracker_args"]['pid']
if sys.platform == 'win32':
handle = data["tracker_args"]["fh"]
_resource_tracker._fd = msvcrt.open_osfhandle(handle, 0)
else:
_resource_tracker._fd = data["tracker_args"]["fd"]
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path'])
# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
_fixup_main_from_path(main_path)

View file

@ -0,0 +1,381 @@
###############################################################################
# Synchronization primitives based on our SemLock implementation
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/synchronize.py (17/02/2017)
# * Remove ctx argument for compatibility reason
# * Implementation of Condition/Event are necessary for compatibility
# with python2.7/3.3, Barrier should be reimplemented to for those
# version (but it is not used in loky).
#
import os
import sys
import tempfile
import threading
import _multiprocessing
from time import time as _time
from .context import assert_spawning
from . import resource_tracker
from multiprocessing import process
from multiprocessing import util
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
if sys.version_info < (3, 4):
from .semlock import SemLock as _SemLock
from .semlock import sem_unlink
else:
from _multiprocessing import SemLock as _SemLock
from _multiprocessing import sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
if sys.version_info[:2] < (3, 3):
FileExistsError = OSError
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue):
# unlink_now is only used on win32 or when we are using fork.
unlink_now = False
for i in range(100):
try:
self._semlock = _SemLock(
kind, value, maxvalue, SemLock._make_name(),
unlink_now)
except FileExistsError: # pragma: no cover
pass
else:
break
else: # pragma: no cover
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s and name "%s"'
% (self._semlock.handle, self._semlock.name))
self._make_methods()
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
# When the object is garbage collected or the
# process shuts down we unlink the semaphore name
resource_tracker.register(self._semlock.name, "semlock")
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
sem_unlink(name)
resource_tracker.unregister(name, "semlock")
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.acquire()
def __exit__(self, *args):
return self._semlock.release()
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r and name "%s"'
% (state[0], state[3]))
self._make_methods()
@staticmethod
def _make_name():
# OSX does not support long names for semaphores
return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
if sys.platform == 'darwin':
raise NotImplementedError("OSX does not implement sem_getvalue")
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self):
super(Lock, self).__init__(SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self):
super(RLock, self).__init__(RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__,
self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False

View file

@ -0,0 +1,172 @@
import os
import sys
import time
import errno
import signal
import warnings
import threading
import subprocess
try:
import psutil
except ImportError:
psutil = None
WIN32 = sys.platform == "win32"
def _flag_current_thread_clean_exit():
"""Put a ``_clean_exit`` flag on the current thread"""
thread = threading.current_thread()
thread._clean_exit = True
def recursive_terminate(process, use_psutil=True):
if use_psutil and psutil is not None:
_recursive_terminate_with_psutil(process)
else:
_recursive_terminate_without_psutil(process)
def _recursive_terminate_with_psutil(process, retries=5):
try:
children = psutil.Process(process.pid).children(recursive=True)
except psutil.NoSuchProcess:
return
# Kill the children in reverse order to avoid killing the parents before
# the children in cases where there are more processes nested.
for child in children[::-1]:
try:
child.kill()
except psutil.NoSuchProcess:
pass
process.terminate()
process.join()
def _recursive_terminate_without_psutil(process):
"""Terminate a process and its descendants.
"""
try:
_recursive_terminate(process.pid)
except OSError as e:
warnings.warn("Failed to kill subprocesses on this platform. Please"
"install psutil: https://github.com/giampaolo/psutil")
# In case we cannot introspect the children, we fall back to the
# classic Process.terminate.
process.terminate()
process.join()
def _recursive_terminate(pid):
"""Recursively kill the descendants of a process before killing it.
"""
if sys.platform == "win32":
# On windows, the taskkill function with option `/T` terminate a given
# process pid and its children.
try:
subprocess.check_output(
["taskkill", "/F", "/T", "/PID", str(pid)],
stderr=None)
except subprocess.CalledProcessError as e:
# In windows, taskkill return 1 for permission denied and 128, 255
# for no process found.
if e.returncode not in [1, 128, 255]:
raise
elif e.returncode == 1:
# Try to kill the process without its descendants if taskkill
# was denied permission. If this fails too, with an error
# different from process not found, let the top level function
# raise a warning and retry to kill the process.
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
else:
try:
children_pids = subprocess.check_output(
["pgrep", "-P", str(pid)],
stderr=None
)
except subprocess.CalledProcessError as e:
# `ps` returns 1 when no child process has been found
if e.returncode == 1:
children_pids = b''
else:
raise
# Decode the result, split the cpid and remove the trailing line
children_pids = children_pids.decode().split('\n')[:-1]
for cpid in children_pids:
cpid = int(cpid)
_recursive_terminate(cpid)
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
# if OSError is raised with [Errno 3] no such process, the process
# is already terminated, else, raise the error and let the top
# level function raise a warning and retry to kill the process.
if e.errno != errno.ESRCH:
raise
def get_exitcodes_terminated_worker(processes):
"""Return a formated string with the exitcodes of terminated workers.
If necessary, wait (up to .25s) for the system to correctly set the
exitcode of one terminated worker.
"""
patience = 5
# Catch the exitcode of the terminated workers. There should at least be
# one. If not, wait a bit for the system to correctly set the exitcode of
# the terminated worker.
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
while len(exitcodes) == 0 and patience > 0:
patience -= 1
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
time.sleep(.05)
return _format_exitcodes(exitcodes)
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = ["{}({})".format(_get_exitcode_name(e), e)
for e in exitcodes if e is not None]
return "{" + ", ".join(str_exitcodes) + "}"
def _get_exitcode_name(exitcode):
if sys.platform == "win32":
# The exitcode are unreliable on windows (see bpo-31863).
# For this case, return UNKNOWN
return "UNKNOWN"
if exitcode < 0:
try:
import signal
if sys.version_info > (3, 5):
return signal.Signals(-exitcode).name
# construct an inverse lookup table
for v, k in signal.__dict__.items():
if (v.startswith('SIG') and not v.startswith('SIG_') and
k == -exitcode):
return v
except ValueError:
return "UNKNOWN"
elif exitcode != 255:
# The exitcode are unreliable on forkserver were 255 is always returned
# (see bpo-30589). For this case, return UNKNOWN
return "EXIT"
return "UNKNOWN"

View file

@ -0,0 +1,113 @@
import inspect
from functools import partial
try:
from joblib.externals.cloudpickle import dumps, loads
cloudpickle = True
except ImportError:
cloudpickle = False
WRAP_CACHE = dict()
class CloudpickledObjectWrapper(object):
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ['_obj', '_keep_wrapper']:
return getattr(self._obj, attr)
return getattr(self, attr)
# Make sure the wrapped object conserves the callable property
class CallableObjectWrapper(CloudpickledObjectWrapper):
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper)
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
if not cloudpickle:
return obj
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
if not cloudpickle:
raise ImportError("could not from joblib.externals import cloudpickle. Please install "
"cloudpickle to allow extended serialization. "
"(`pip install cloudpickle`).")
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,232 @@
###############################################################################
# Reusable ProcessPoolExecutor
#
# author: Thomas Moreau and Olivier Grisel
#
import time
import warnings
import threading
import multiprocessing as mp
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
from .backend.context import cpu_count
from .backend import get_context
__all__ = ['get_reusable_executor']
# Python 2 compat helper
STRING_TYPE = type("")
# Singleton executor and id management
_executor_lock = threading.RLock()
_next_executor_id = 0
_executor = None
_executor_kwargs = None
def _get_next_executor_id():
"""Ensure that each successive executor instance has a unique, monotonic id.
The purpose of this monotonic id is to help debug and test automated
instance creation.
"""
global _next_executor_id
with _executor_lock:
executor_id = _next_executor_id
_next_executor_id += 1
return executor_id
def get_reusable_executor(max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
"""Return the current ReusableExectutor instance.
Start a new instance if it has not been started already or if the previous
instance was left in a broken state.
If the previous instance does not have the requested number of workers, the
executor is dynamically resized to adjust the number of workers prior to
returning.
Reusing a singleton instance spares the overhead of starting new worker
processes and importing common python packages each time.
``max_workers`` controls the maximum number of tasks that can be running in
parallel in worker processes. By default this is set to the number of
CPUs on the host.
Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
so as to release system resources. New workers are respawn upon submission
of new tasks so that ``max_workers`` are available to accept the newly
submitted tasks. Setting ``timeout`` to around 100 times the time required
to spawn new processes and import packages in them (on the order of 100ms)
ensures that the overhead of spawning workers is negligible.
Setting ``kill_workers=True`` makes it possible to forcibly interrupt
previously spawned jobs to get a new instance of the reusable executor
with new constructor argument values.
The ``job_reducers`` and ``result_reducers`` are used to customize the
pickling of tasks and results send to the executor.
When provided, the ``initializer`` is run first in newly spawned
processes with argument ``initargs``.
The environment variable in the child process are a copy of the values in
the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
``VAR`` are string literals to overwrite the environment variable ``ENV``
in the child processes to value ``VAL``. The environment variables are set
in the children before any module is loaded. This only works with with the
``loky`` context and it is unreliable on Windows with Python < 3.6.
"""
_executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=max_workers, context=context, timeout=timeout,
kill_workers=kill_workers, reuse=reuse, job_reducers=job_reducers,
result_reducers=result_reducers, initializer=initializer,
initargs=initargs, env=env
)
return _executor
class _ReusablePoolExecutor(ProcessPoolExecutor):
def __init__(self, submit_resize_lock, max_workers=None, context=None,
timeout=None, executor_id=0, job_reducers=None,
result_reducers=None, initializer=None, initargs=(),
env=None):
super(_ReusablePoolExecutor, self).__init__(
max_workers=max_workers, context=context, timeout=timeout,
job_reducers=job_reducers, result_reducers=result_reducers,
initializer=initializer, initargs=initargs, env=env)
self.executor_id = executor_id
self._submit_resize_lock = submit_resize_lock
@classmethod
def get_reusable_executor(cls, max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
with _executor_lock:
global _executor, _executor_kwargs
executor = _executor
if max_workers is None:
if reuse is True and executor is not None:
max_workers = executor._max_workers
else:
max_workers = cpu_count()
elif max_workers <= 0:
raise ValueError(
"max_workers must be greater than 0, got {}."
.format(max_workers))
if isinstance(context, STRING_TYPE):
context = get_context(context)
if context is not None and context.get_start_method() == "fork":
raise ValueError(
"Cannot use reusable executor with the 'fork' context"
)
kwargs = dict(context=context, timeout=timeout,
job_reducers=job_reducers,
result_reducers=result_reducers,
initializer=initializer, initargs=initargs,
env=env)
if executor is None:
is_reused = False
mp.util.debug("Create a executor with max_workers={}."
.format(max_workers))
executor_id = _get_next_executor_id()
_executor_kwargs = kwargs
_executor = executor = cls(
_executor_lock, max_workers=max_workers,
executor_id=executor_id, **kwargs)
else:
if reuse == 'auto':
reuse = kwargs == _executor_kwargs
if (executor._flags.broken or executor._flags.shutdown
or not reuse):
if executor._flags.broken:
reason = "broken"
elif executor._flags.shutdown:
reason = "shutdown"
else:
reason = "arguments have changed"
mp.util.debug(
"Creating a new executor with max_workers={} as the "
"previous instance cannot be reused ({})."
.format(max_workers, reason))
executor.shutdown(wait=True, kill_workers=kill_workers)
_executor = executor = _executor_kwargs = None
# Recursive call to build a new instance
return cls.get_reusable_executor(max_workers=max_workers,
**kwargs)
else:
mp.util.debug(
"Reusing existing executor with max_workers={}."
.format(executor._max_workers)
)
is_reused = True
executor._resize(max_workers)
return executor, is_reused
def submit(self, fn, *args, **kwargs):
with self._submit_resize_lock:
return super(_ReusablePoolExecutor, self).submit(
fn, *args, **kwargs)
def _resize(self, max_workers):
with self._submit_resize_lock:
if max_workers is None:
raise ValueError("Trying to resize with max_workers=None")
elif max_workers == self._max_workers:
return
if self._executor_manager_thread is None:
# If the executor_manager_thread has not been started
# then no processes have been spawned and we can just
# update _max_workers and return
self._max_workers = max_workers
return
self._wait_job_completion()
# Some process might have returned due to timeout so check how many
# children are still alive. Use the _process_management_lock to
# ensure that no process are spawned or timeout during the resize.
with self._processes_management_lock:
processes = list(self._processes.values())
nb_children_alive = sum(p.is_alive() for p in processes)
self._max_workers = max_workers
for _ in range(max_workers, nb_children_alive):
self._call_queue.put(None)
while (len(self._processes) > max_workers
and not self._flags.broken):
time.sleep(1e-3)
self._adjust_process_count()
processes = list(self._processes.values())
while not all([p.is_alive() for p in processes]):
time.sleep(1e-3)
def _wait_job_completion(self):
"""Wait for the cache to be empty before resizing the pool."""
# Issue a warning to the user about the bad effect of this usage.
if len(self._pending_work_items) > 0:
warnings.warn("Trying to resize an executor with running jobs: "
"waiting for jobs completion before resizing.",
UserWarning)
mp.util.debug("Executor {} waiting for jobs completion before"
" resizing".format(self.executor_id))
# Wait for the completion of the jobs
while len(self._pending_work_items) > 0:
time.sleep(1e-3)
def _setup_queues(self, job_reducers, result_reducers):
# As this executor can be resized, use a large queue size to avoid
# underestimating capacity and introducing overhead
queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
super(_ReusablePoolExecutor, self)._setup_queues(
job_reducers, result_reducers, queue_size=queue_size)