Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,139 @@
"""Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular:
1. transparent disk-caching of functions and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
Joblib is optimized to be **fast** and **robust** on large
data in particular and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
==================== ===============================================
**Documentation:** https://joblib.readthedocs.io
**Download:** https://pypi.python.org/pypi/joblib#downloads
**Source code:** https://github.com/joblib/joblib
**Report issues:** https://github.com/joblib/joblib/issues
==================== ===============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing the same thing twice**: code is often rerun again and
again, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solutions to alleviate this
issue are error-prone and often lead to unreproducible results.
* **Persist to disk transparently**: efficiently persisting
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib addresses these problems while **leaving your code and your flow
control as unmodified as possible** (no framework, no new paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from joblib import Memory
>>> cachedir = 'your_cache_dir_goes_here'
>>> mem = Memory(cachedir)
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[0., 0., 1.],
[1., 1., 1.],
[4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree(cachedir)
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.0'
import os
from .memory import Memory, MemorizedResult, register_store_backend
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .compressor import register_compressor
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
from .externals.loky import wrap_non_picklable_objects
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend',
'register_store_backend', 'register_compressor',
'wrap_non_picklable_objects']
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")

View file

@ -0,0 +1,357 @@
from __future__ import print_function, division, absolute_import
import asyncio
import concurrent.futures
import contextlib
import time
from uuid import uuid4
import weakref
from .parallel import AutoBatchingMixin, ParallelBackendBase, BatchedCalls
from .parallel import parallel_backend
try:
import distributed
except ImportError:
distributed = None
if distributed is not None:
from dask.utils import funcname, itemgetter
from dask.sizeof import sizeof
from dask.distributed import (
Client,
as_completed,
get_client,
secede,
rejoin
)
from distributed.utils import thread_state
try:
# asyncio.TimeoutError, Python3-only error thrown by recent versions of
# distributed
from distributed.utils import TimeoutError as _TimeoutError
except ImportError:
from tornado.gen import TimeoutError as _TimeoutError
def is_weakrefable(obj):
try:
weakref.ref(obj)
return True
except TypeError:
return False
class _WeakKeyDictionary:
"""A variant of weakref.WeakKeyDictionary for unhashable objects.
This datastructure is used to store futures for broadcasted data objects
such as large numpy arrays or pandas dataframes that are not hashable and
therefore cannot be used as keys of traditional python dicts.
Futhermore using a dict with id(array) as key is not safe because the
Python is likely to reuse id of recently collected arrays.
"""
def __init__(self):
self._data = {}
def __getitem__(self, obj):
ref, val = self._data[id(obj)]
if ref() is not obj:
# In case of a race condition with on_destroy.
raise KeyError(obj)
return val
def __setitem__(self, obj, value):
key = id(obj)
try:
ref, _ = self._data[key]
if ref() is not obj:
# In case of race condition with on_destroy.
raise KeyError(obj)
except KeyError:
# Insert the new entry in the mapping along with a weakref
# callback to automatically delete the entry from the mapping
# as soon as the object used as key is garbage collected.
def on_destroy(_):
del self._data[key]
ref = weakref.ref(obj, on_destroy)
self._data[key] = ref, value
def __len__(self):
return len(self._data)
def clear(self):
self._data.clear()
def _funcname(x):
try:
if isinstance(x, list):
x = x[0][0]
except Exception:
pass
return funcname(x)
def _make_tasks_summary(tasks):
"""Summarize of list of (func, args, kwargs) function calls"""
unique_funcs = {func for func, args, kwargs in tasks}
if len(unique_funcs) == 1:
mixed = False
else:
mixed = True
return len(tasks), mixed, _funcname(tasks)
class Batch:
"""dask-compatible wrapper that executes a batch of tasks"""
def __init__(self, tasks):
# collect some metadata from the tasks to ease Batch calls
# introspection when debugging
self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(
tasks
)
def __call__(self, tasks=None):
results = []
with parallel_backend('dask'):
for func, args, kwargs in tasks:
results.append(func(*args, **kwargs))
return results
def __repr__(self):
descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
if self._mixed:
descr = "mixed_" + descr
return descr
def _joblib_probe_task():
# Noop used by the joblib connector to probe when workers are ready.
pass
class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
supports_timeout = True
def __init__(self, scheduler_host=None, scatter=None,
client=None, loop=None, wait_for_workers_timeout=10,
**submit_kwargs):
super().__init__()
if distributed is None:
msg = ("You are trying to use 'dask' as a joblib parallel backend "
"but dask is not installed. Please install dask "
"to fix this error.")
raise ValueError(msg)
if client is None:
if scheduler_host:
client = Client(scheduler_host, loop=loop,
set_as_default=False)
else:
try:
client = get_client()
except ValueError as e:
msg = ("To use Joblib with Dask first create a Dask Client"
"\n\n"
" from dask.distributed import Client\n"
" client = Client()\n"
"or\n"
" client = Client('scheduler-address:8786')")
raise ValueError(msg) from e
self.client = client
if scatter is not None and not isinstance(scatter, (list, tuple)):
raise TypeError("scatter must be a list/tuple, got "
"`%s`" % type(scatter).__name__)
if scatter is not None and len(scatter) > 0:
# Keep a reference to the scattered data to keep the ids the same
self._scatter = list(scatter)
scattered = self.client.scatter(scatter, broadcast=True)
self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
else:
self._scatter = []
self.data_futures = {}
self.wait_for_workers_timeout = wait_for_workers_timeout
self.submit_kwargs = submit_kwargs
self.waiting_futures = as_completed(
[],
loop=client.loop,
with_results=True,
raise_errors=False
)
self._results = {}
self._callbacks = {}
async def _collect(self):
while self._continue:
async for future, result in self.waiting_futures:
cf_future = self._results.pop(future)
callback = self._callbacks.pop(future)
if future.status == "error":
typ, exc, tb = result
cf_future.set_exception(exc)
else:
cf_future.set_result(result)
callback(result)
await asyncio.sleep(0.01)
def __reduce__(self):
return (DaskDistributedBackend, ())
def get_nested_backend(self):
return DaskDistributedBackend(client=self.client), -1
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.parallel = parallel
return self.effective_n_jobs(n_jobs)
def start_call(self):
self._continue = True
self.client.loop.add_callback(self._collect)
self.call_data_futures = _WeakKeyDictionary()
def stop_call(self):
# The explicit call to clear is required to break a cycling reference
# to the futures.
self._continue = False
# wait for the future collection routine (self._backend._collect) to
# finish in order to limit asyncio warnings due to aborting _collect
# during a following backend termination call
time.sleep(0.01)
self.call_data_futures.clear()
def effective_n_jobs(self, n_jobs):
effective_n_jobs = sum(self.client.ncores().values())
if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
return effective_n_jobs
# If there is no worker, schedule a probe task to wait for the workers
# to come up and be available. If the dask cluster is in adaptive mode
# task might cause the cluster to provision some workers.
try:
self.client.submit(_joblib_probe_task).result(
timeout=self.wait_for_workers_timeout)
except _TimeoutError as e:
error_msg = (
"DaskDistributedBackend has no worker after {} seconds. "
"Make sure that workers are started and can properly connect "
"to the scheduler and increase the joblib/dask connection "
"timeout with:\n\n"
"parallel_backend('dask', wait_for_workers_timeout={})"
).format(self.wait_for_workers_timeout,
max(10, 2 * self.wait_for_workers_timeout))
raise TimeoutError(error_msg) from e
return sum(self.client.ncores().values())
async def _to_func_args(self, func):
itemgetters = dict()
# Futures that are dynamically generated during a single call to
# Parallel.__call__.
call_data_futures = getattr(self, 'call_data_futures', None)
async def maybe_to_futures(args):
out = []
for arg in args:
arg_id = id(arg)
if arg_id in itemgetters:
out.append(itemgetters[arg_id])
continue
f = self.data_futures.get(arg_id, None)
if f is None and call_data_futures is not None:
try:
f = call_data_futures[arg]
except KeyError:
pass
if f is None:
if is_weakrefable(arg) and sizeof(arg) > 1e3:
# Automatically scatter large objects to some of
# the workers to avoid duplicated data transfers.
# Rely on automated inter-worker data stealing if
# more workers need to reuse this data
# concurrently.
# set hash=False - nested scatter calls (i.e
# calling client.scatter inside a dask worker)
# using hash=True often raise CancelledError,
# see dask/distributed#3703
[f] = await self.client.scatter(
[arg],
asynchronous=True,
hash=False
)
call_data_futures[arg] = f
if f is not None:
out.append(f)
else:
out.append(arg)
return out
tasks = []
for f, args, kwargs in func.items:
args = list(await maybe_to_futures(args))
kwargs = dict(zip(kwargs.keys(),
await maybe_to_futures(kwargs.values())))
tasks.append((f, args, kwargs))
return (Batch(tasks), tasks)
def apply_async(self, func, callback=None):
cf_future = concurrent.futures.Future()
cf_future.get = cf_future.result # achieve AsyncResult API
async def f(func, callback):
batch, tasks = await self._to_func_args(func)
key = f'{repr(batch)}-{uuid4().hex}'
dask_future = self.client.submit(
batch, tasks=tasks, key=key, **self.submit_kwargs
)
self.waiting_futures.add(dask_future)
self._callbacks[dask_future] = callback
self._results[dask_future] = cf_future
self.client.loop.add_callback(f, func, callback)
return cf_future
def abort_everything(self, ensure_ready=True):
""" Tell the client to cancel any task submitted via this instance
joblib.Parallel will never access those results
"""
with self.waiting_futures.lock:
self.waiting_futures.futures.clear()
while not self.waiting_futures.queue.empty():
self.waiting_futures.queue.get()
@contextlib.contextmanager
def retrieval_context(self):
"""Override ParallelBackendBase.retrieval_context to avoid deadlocks.
This removes thread from the worker's thread pool (using 'secede').
Seceding avoids deadlock in nested parallelism settings.
"""
# See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
# this is used.
if hasattr(thread_state, 'execution_state'):
# we are in a worker. Secede to avoid deadlock.
secede()
yield
if hasattr(thread_state, 'execution_state'):
rejoin()

View file

@ -0,0 +1,397 @@
"""
Represent an exception with a lot of information.
Provides 2 useful functions:
format_exc: format an exception into a complete traceback, with full
debugging instruction.
format_outer_frames: format the current position in the stack call.
Adapted from IPython's VerboseTB.
"""
# Authors: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Nathaniel Gray <n8gray@caltech.edu>
# Fernando Perez <fperez@colorado.edu>
# Copyright: 2010, Gael Varoquaux
# 2001-2004, Fernando Perez
# 2001 Nathaniel Gray
# License: BSD 3 clause
# flake8: noqa
import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
INDENT = ' ' * 8
###############################################################################
# some internal-use functions
def safe_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return safe_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % safe_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eq_repr(value, repr=safe_repr):
return '=%s' % repr(value)
###############################################################################
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
A naive solution to this problem which just makes a dictionary with the
elements as keys fails to respect the stability condition, since
dictionaries are unsorted by nature.
Note: All elements in the input must be hashable.
"""
unique = []
unique_set = set()
for nn in elems:
if nn not in unique_set:
unique.append(nn)
unique_set.add(nn)
return unique
###############################################################################
def fix_frame_records_filenames(records):
"""Try to fix the filenames in each record from inspect.getinnerframes().
Particularly, modules loaded from within zip files have useless filenames
attached to their code object, and inspect.getinnerframes() just uses it.
"""
fixed_records = []
for frame, filename, line_no, func_name, lines, index in records:
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
fixed_records.append((frame, filename, line_no, func_name, lines,
index))
return fixed_records
def _fixed_getframes(etb, context=1, tb_offset=0):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in enumerate(aux):
maybe_start = lnum - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
def _format_traceback_lines(lnum, index, lines, lvals=None):
numbers_width = 7
res = []
i = lnum - index
for line in lines:
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
if pad >= 3:
marker = '-' * (pad - 3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = marker + str(i)
else:
num = '%*s' % (numbers_width, i)
line = '%s %s' % (num, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + '\n')
i = i + 1
return res
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
if file.endswith('.pyc'):
file = file[:-4] + '.py'
link = file
args, varargs, varkw, locals = inspect.getargvalues(frame)
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in tokenize.generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError, SyntaxError):
# signals exit of tokenizer
# SyntaxError can happen when trying to tokenize
# a compiled (e.g. .so or .pyd) extension
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
###############################################################################
def format_exc(etype, evalue, etb, context=5, tb_offset=0):
""" Return a nice text document describing the traceback.
Parameters
-----------
etype, evalue, etb: as returned by sys.exc_info
context: number of lines of the source file to plot
tb_offset: the number of stack frame not to use (0 = use all)
"""
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
pid = 'PID: %i' % os.getpid()
head = '%s%s%s\n%s%s%s' % (
etype, ' ' * (75 - len(str(etype)) - len(date)),
date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),
pyver)
# Drop topmost frames if requested
records = _fixed_getframes(etb, context, tb_offset)
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except BaseException:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
exception = ['%s: %s' % (etype_str, evalue_str)]
frames = format_records(records)
return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0]))
###############################################################################
def format_outer_frames(context=5, stack_start=None, stack_end=None,
ignore_ipython=True):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = inspect.getouterframes(inspect.currentframe())
output = list()
for i, (frame, filename, line_no, func_name, lines, index) \
in enumerate(records):
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
if filename.endswith('.pyc'):
filename = filename[:-4] + '.py'
if ignore_ipython:
# Hack to avoid printing the internals of IPython
if (os.path.basename(filename) in ('iplib.py', 'py3compat.py')
and func_name in ('execfile', 'safe_execfile', 'runcode')):
break
maybe_start = line_no - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(filename)[start:end]
buf = list(records[i])
buf[LNUM_POS] = line_no
buf[INDEX_POS] = line_no - 1 - start
buf[LINES_POS] = lines
output.append(tuple(buf))
return '\n'.join(format_records(output[stack_end:stack_start:-1]))

View file

@ -0,0 +1,115 @@
"""
Exceptions
This module is deprecated and will be removed in joblib 0.16.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
class JoblibException(Exception):
"""A simple exception with an error message that you can get to."""
def __init__(self, *args):
# We need to implement __init__ so that it is picked in the
# multiple heritance hierarchy in the class created in
# _mk_exception. Note: in Python 2, if you implement __init__
# in your exception class you need to set .args correctly,
# otherwise you can dump an exception instance with pickle but
# not load it (at load time an empty .args will be passed to
# the constructor). Also we want to be explicit and not use
# 'super' here. Using 'super' can cause a sibling class method
# to be called and we have no control the sibling class method
# constructor signature in the exception returned by
# _mk_exception.
Exception.__init__(self, *args)
def __repr__(self):
if hasattr(self, 'args') and len(self.args) > 0:
message = self.args[0]
else:
message = ''
name = self.__class__.__name__
return '%s\n%s\n%s\n%s' % (name, 75 * '_', message, 75 * '_')
__str__ = __repr__
class TransportableException(JoblibException):
"""An exception containing all the info to wrap an original
exception and recreate it.
"""
def __init__(self, message, etype):
# The next line set the .args correctly. This is needed to
# make the exception loadable with pickle
JoblibException.__init__(self, message, etype)
self.message = message
self.etype = etype
def unwrap(self, context_message=""):
report = """\
%s
---------------------------------------------------------------------------
Joblib worker traceback:
---------------------------------------------------------------------------
%s""" % (context_message, self.message)
# Unwrap the exception to a JoblibException
exception_type = _mk_exception(self.etype)[0]
return exception_type(report)
_exception_mapping = dict()
def _mk_exception(exception, name=None):
if issubclass(exception, JoblibException):
# No need to wrap recursively JoblibException
return exception, exception.__name__
# Create an exception inheriting from both JoblibException
# and that exception
if name is None:
name = exception.__name__
this_name = 'Joblib%s' % name
if this_name in _exception_mapping:
# Avoid creating twice the same exception
this_exception = _exception_mapping[this_name]
else:
if exception is Exception:
# JoblibException is already a subclass of Exception. No
# need to use multiple inheritance
return JoblibException, this_name
try:
this_exception = type(
this_name, (JoblibException, exception), {})
_exception_mapping[this_name] = this_exception
except TypeError:
# This happens if "Cannot create a consistent method
# resolution order", e.g. because 'exception' is a
# subclass of JoblibException or 'exception' is not an
# acceptable base class
this_exception = JoblibException
return this_exception, this_name
def _mk_common_exceptions():
namespace = dict()
import builtins as _builtin_exceptions
common_exceptions = filter(
lambda x: x.endswith('Error'),
dir(_builtin_exceptions))
for name in common_exceptions:
obj = getattr(_builtin_exceptions, name)
if isinstance(obj, type) and issubclass(obj, BaseException):
this_obj, this_name = _mk_exception(obj, name=name)
namespace[this_name] = this_obj
return namespace
# Updating module locals so that the exceptions pickle right. AFAIK this
# works only at module-creation time
locals().update(_mk_common_exceptions())

View file

@ -0,0 +1,664 @@
"""
Reducer using memory mapping for numpy arrays
"""
# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>
# Copyright: 2017, Thomas Moreau
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import threading
import atexit
import tempfile
import time
import warnings
import weakref
from uuid import uuid4
from multiprocessing import util
from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
try:
WindowsError
except NameError:
WindowsError = type(None)
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import dump, load, load_temporary_memmap
from .backports import make_memmap
from .disk import delete_folder
from .externals.loky.backend import resource_tracker
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses.
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using
# it as the default folder to dump big arrays to share with subprocesses.
SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
# Folder and file permissions to chmod temporary files generated by the
# memmapping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
# Set used in joblib workers, referencing the filenames of temporary memmaps
# created by joblib to speed up data communication. In child processes, we add
# a finalizer to these memmaps that sends a maybe_unlink call to the
# resource_tracker, in order to free main memory as fast as possible.
JOBLIB_MMAPS = set()
def _log_and_unlink(filename):
from .externals.loky.backend.resource_tracker import _resource_tracker
util.debug(
"[FINALIZER CALL] object mapping to {} about to be deleted,"
" decrementing the refcount of the file (pid: {})".format(
os.path.basename(filename), os.getpid()))
_resource_tracker.maybe_unlink(filename, "file")
def add_maybe_unlink_finalizer(memmap):
util.debug(
"[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})"
"".format(type(memmap), id(memmap), os.path.basename(memmap.filename),
os.getpid()))
weakref.finalize(memmap, _log_and_unlink, memmap.filename)
def unlink_file(filename):
"""Wrapper around os.unlink with a retry mechanism.
The retry mechanism has been implemented primarily to overcome a race
condition happening during the finalizer of a np.memmap: when a process
holding the last reference to a mmap-backed np.memmap/np.array is about to
delete this array (and close the reference), it sends a maybe_unlink
request to the resource_tracker. This request can be processed faster than
it takes for the last reference of the memmap to be closed, yielding (on
Windows) a PermissionError in the resource_tracker loop.
"""
NUM_RETRIES = 10
for retry_no in range(1, NUM_RETRIES + 1):
try:
os.unlink(filename)
break
except PermissionError:
util.debug(
'[ResourceTracker] tried to unlink {}, got '
'PermissionError'.format(filename)
)
if retry_no == NUM_RETRIES:
raise
else:
time.sleep(.2)
resource_tracker._CLEANUP_FUNCS['file'] = unlink_file
class _WeakArrayKeyMap:
"""A variant of weakref.WeakKeyDictionary for unhashable numpy arrays.
This datastructure will be used with numpy arrays as obj keys, therefore we
do not use the __get__ / __set__ methods to avoid any conflict with the
numpy fancy indexing syntax.
"""
def __init__(self):
self._data = {}
def get(self, obj):
ref, val = self._data[id(obj)]
if ref() is not obj:
# In case of race condition with on_destroy: could never be
# triggered by the joblib tests with CPython.
raise KeyError(obj)
return val
def set(self, obj, value):
key = id(obj)
try:
ref, _ = self._data[key]
if ref() is not obj:
# In case of race condition with on_destroy: could never be
# triggered by the joblib tests with CPython.
raise KeyError(obj)
except KeyError:
# Insert the new entry in the mapping along with a weakref
# callback to automatically delete the entry from the mapping
# as soon as the object used as key is garbage collected.
def on_destroy(_):
del self._data[key]
ref = weakref.ref(obj, on_destroy)
self._data[key] = ref, value
def __getstate__(self):
raise PicklingError("_WeakArrayKeyMap is not pickleable")
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def _get_temp_dir(pool_folder_name, temp_folder=None):
"""Get the full path to a subfolder inside the temporary folder.
Parameters
----------
pool_folder_name : str
Sub-folder name used for the serialization of a pool instance.
temp_folder: str, optional
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Returns
-------
pool_folder : str
full path to the temporary folder
use_shared_mem : bool
whether the temporary folder is written to the system shared memory
folder or some other temporary folder.
"""
use_shared_mem = False
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
# Try to see if we have write access to the shared mem
# folder only if it is reasonably large (that is 2GB or
# more).
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except (IOError, OSError):
# Missing rights in the /dev/shm partition, fallback to regular
# temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
return pool_folder, use_shared_mem
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len, unlink_on_gc_collect):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return make_memmap(
filename, dtype=dtype, shape=shape, mode=mode, offset=offset,
order=order, unlink_on_gc_collect=unlink_on_gc_collect
)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = make_memmap(
filename, dtype=dtype, shape=total_buffer_len, offset=offset,
mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect
)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
util.debug('[MEMMAP REDUCE] reducing a memmap-backed array '
'(shape, {}, pid: {})'.format(a.shape, os.getpid()))
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len, False))
def reduce_array_memmap_backward(a):
"""reduce a np.array or a np.memmap from a child process"""
m = _get_backing_memmap(a)
if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
# if a is backed by a memmaped file, reconstruct a using the
# memmaped file.
return _reduce_memmap_backed(a, m)
else:
# a is either a regular (not memmap-backed) numpy array, or an array
# backed by a shared temporary file created by joblib. In the latter
# case, in order to limit the lifespan of these temporary files, we
# serialize the memmap as a regular numpy array, and decref the
# file backing the memmap (done implicitly in a previously registered
# finalizer, see ``unlink_on_gc_collect`` for more details)
return (
loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), )
)
class ArrayMemmapForwardReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmapping of large arrays to files created
a folder.
temp_folder_resolver: callable
An callable in charge of resolving a temporary folder name where files
for backing memmapped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmapped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode,
unlink_on_gc_collect, verbose=0, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder_resolver = temp_folder_resolver
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
if prewarm == "auto":
self._prewarm = not self._temp_folder.startswith(
SYSTEM_SHARED_MEM_FS
)
else:
self._prewarm = prewarm
self._prewarm = prewarm
self._memmaped_arrays = _WeakArrayKeyMap()
self._temporary_memmaped_filenames = set()
self._unlink_on_gc_collect = unlink_on_gc_collect
@property
def _temp_folder(self):
return self._temp_folder_resolver()
def __reduce__(self):
# The ArrayMemmapForwardReducer is passed to the children processes: it
# needs to be pickled but the _WeakArrayKeyMap need to be skipped as
# it's only guaranteed to be consistent with the parent process memory
# garbage collection.
# Although this reducer is pickled, it is not needed in its destination
# process (child processes), as we only use this reducer to send
# memmaps from the parent process to the children processes. For this
# reason, we can afford skipping the resolver, (which would otherwise
# be unpicklable), and pass it as None instead.
args = (self._max_nbytes, None, self._mmap_mode,
self._unlink_on_gc_collect)
kwargs = {
'verbose': self.verbose,
'prewarm': self._prewarm,
}
return ArrayMemmapForwardReducer, args, kwargs
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None and isinstance(m, np.memmap):
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject and self._max_nbytes is not None and
a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
try:
basename = self._memmaped_arrays.get(a)
except KeyError:
# Generate a new unique random filename. The process and thread
# ids are only useful for debugging purpose and to make it
# easier to cleanup orphaned files in case of hard process
# kill (e.g. by "kill -9" or segfault).
basename = "{}-{}-{}.pkl".format(
os.getpid(), id(threading.current_thread()), uuid4().hex)
self._memmaped_arrays.set(a, basename)
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
is_new_memmap = filename not in self._temporary_memmaped_filenames
# add the memmap to the list of temporary memmaps created by joblib
self._temporary_memmaped_filenames.add(filename)
if self._unlink_on_gc_collect:
# Bump reference count of the memmap by 1 to account for
# shared usage of the memmap by a child process. The
# corresponding decref call will be executed upon calling
# resource_tracker.maybe_unlink, registered as a finalizer in
# the child.
# the incref/decref calls here are only possible when the child
# and the parent share the same resource_tracker. It is not the
# case for the multiprocessing backend, but it does not matter
# because unlinking a memmap from a child process is only
# useful to control the memory usage of long-lasting child
# processes, while the multiprocessing-based pools terminate
# their workers at the end of a map() call.
resource_tracker.register(filename, "file")
if is_new_memmap:
# Incref each temporary memmap created by joblib one extra
# time. This means that these memmaps will only be deleted
# once an extra maybe_unlink() is called, which is done once
# all the jobs have completed (or been canceled) in the
# Parallel._terminate_backend() method.
resource_tracker.register(filename, "file")
if not os.path.exists(filename):
util.debug(
"[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
"creating a new memmap at {}".format(
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data by accessing it. This operation ensures
# that the disk access required to create the memmapping
# file are performed in the reducing process and avoids
# concurrent memmap creation in multiple children
# processes.
load(filename, mmap_mode=self._mmap_mode).max()
else:
util.debug(
"[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
"reusing memmap file: {}".format(
a.shape, a.dtype, os.path.basename(filename)))
# The worker process will use joblib.load to memmap the data
return (
(load_temporary_memmap, (filename, self._mmap_mode,
self._unlink_on_gc_collect))
)
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
util.debug(
'[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
' dtype={}).'.format(a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
def get_memmapping_reducers(
forward_reducers=None, backward_reducers=None,
temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0,
prewarm=False, unlink_on_gc_collect=True, **kwargs):
"""Construct a pair of memmapping reducer linked to a tmpdir.
This function manage the creation and the clean up of the temporary folders
underlying the memory maps and should be use to get the reducers necessary
to construct joblib pool or executor.
"""
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is also able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
forward_reduce_ndarray = ArrayMemmapForwardReducer(
max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect,
verbose, prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = forward_reduce_ndarray
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reducers[np.ndarray] = reduce_array_memmap_backward
backward_reducers[np.memmap] = reduce_array_memmap_backward
return forward_reducers, backward_reducers
class TemporaryResourcesManager(object):
"""Stateful object able to manage temporary folder and pickles
It exposes:
- a per-context folder name resolving API that memmap-based reducers will
rely on to know where to pickle the temporary memmaps
- a temporary file/folder management API that internally uses the
resource_tracker.
"""
def __init__(self, temp_folder_root=None, context_id=None):
self._current_temp_folder = None
self._temp_folder_root = temp_folder_root
self._use_shared_mem = None
self._cached_temp_folders = dict()
self._id = uuid4().hex
self._finalizers = {}
if context_id is None:
# It would be safer to not assign a default context id (less silent
# bugs), but doing this while maintaining backward compatibility
# with the previous, context-unaware version get_memmaping_executor
# exposes exposes too many low-level details.
context_id = uuid4().hex
self.set_current_context(context_id)
def set_current_context(self, context_id):
self._current_context_id = context_id
self.register_new_context(context_id)
def register_new_context(self, context_id):
# Prepare a sub-folder name specific to a context (usually a unique id
# generated by each instance of the Parallel class). Do not create in
# advance to spare FS write access if no array is to be dumped).
if context_id in self._cached_temp_folders:
return
else:
# During its lifecycle, one Parallel object can have several
# executors associated to it (for instance, if a loky worker raises
# an exception, joblib shutdowns the executor and instantly
# recreates a new one before raising the error - see
# ``ensure_ready``. Because we don't want two executors tied to
# the same Parallel object (and thus the same context id) to
# register/use/delete the same folder, we also add an id specific
# to the current Manager (and thus specific to its associated
# executor) to the folder name.
new_folder_name = (
"joblib_memmapping_folder_{}_{}_{}".format(
os.getpid(), self._id, context_id)
)
new_folder_path, _ = _get_temp_dir(
new_folder_name, self._temp_folder_root
)
self.register_folder_finalizer(new_folder_path, context_id)
self._cached_temp_folders[context_id] = new_folder_path
def resolve_temp_folder_name(self):
"""Return a folder name specific to the currently activated context"""
return self._cached_temp_folders[self._current_context_id]
def _unregister_context(self, context_id=None):
if context_id is None:
for context_id in list(self._cached_temp_folders):
self._unregister_context(context_id)
else:
temp_folder = self._cached_temp_folders[context_id]
finalizer = self._finalizers[context_id]
resource_tracker.unregister(temp_folder, "folder")
atexit.unregister(finalizer)
self._cached_temp_folders.pop(context_id)
self._finalizers.pop(context_id)
# resource management API
def register_folder_finalizer(self, pool_subfolder, context_id):
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# ensure that this callback won't prevent garbage collection of
# parallel instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
resource_tracker.register(pool_subfolder, "folder")
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
try:
delete_folder(pool_subfolder, allow_non_empty=True)
resource_tracker.unregister(pool_subfolder, "folder")
except OSError:
warnings.warn("Failed to delete temporary folder: {}"
.format(pool_subfolder))
self._finalizers[context_id] = atexit.register(_cleanup)
def _unlink_temporary_resources(self, context_id=None):
"""Unlink temporary resources created by a process-based pool"""
if context_id is None:
# iterate over a copy of the cache keys because
# unlink_temporary_resources further deletes an entry in this
# cache
for context_id in self._cached_temp_folders.copy():
self._unlink_temporary_resources(context_id)
else:
temp_folder = self._cached_temp_folders[context_id]
if os.path.exists(temp_folder):
for filename in os.listdir(temp_folder):
resource_tracker.maybe_unlink(
os.path.join(temp_folder, filename), "file"
)
self._try_delete_folder(
allow_non_empty=False, context_id=context_id
)
def _unregister_temporary_resources(self, context_id=None):
"""Unregister temporary resources created by a process-based pool"""
if context_id is None:
for context_id in self._cached_temp_folders:
self._unregister_temporary_resources(context_id)
else:
temp_folder = self._cached_temp_folders[context_id]
if os.path.exists(temp_folder):
for filename in os.listdir(temp_folder):
resource_tracker.unregister(
os.path.join(temp_folder, filename), "file"
)
def _try_delete_folder(self, allow_non_empty, context_id=None):
if context_id is None:
# ditto
for context_id in self._cached_temp_folders.copy():
self._try_delete_folder(
allow_non_empty=allow_non_empty, context_id=context_id
)
else:
temp_folder = self._cached_temp_folders[context_id]
try:
delete_folder(
temp_folder, allow_non_empty=allow_non_empty
)
# Now that this folder is deleted, we can forget about it
self._unregister_context(context_id)
except OSError:
# Temporary folder cannot be deleted right now. No need to
# handle it though, as this folder will be cleaned up by an
# atexit finalizer registered by the memmapping_reducer.
pass

View file

@ -0,0 +1,52 @@
"""Helper module to factorize the conditional multiprocessing import logic
We use a distinct module to simplify import statements and avoid introducing
circular dependencies (for instance for the assert_spawning name).
"""
import os
import warnings
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
except ImportError:
mp = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if mp is not None:
try:
# try to create a named semaphore using SemLock to make sure they are
# available on this platform. We use the low level object
# _multiprocessing.SemLock to avoid spawning a resource tracker on
# Unix system or changing the default backend.
import tempfile
from _multiprocessing import SemLock
_rand = tempfile._RandomNameSequence()
for i in range(100):
try:
name = '/joblib-{}-{}' .format(
os.getpid(), next(_rand))
_sem = SemLock(0, 0, 1, name=name, unlink=True)
del _sem # cleanup
break
except FileExistsError as e: # pragma: no cover
if i >= 99:
raise FileExistsError(
'cannot find name for semaphore') from e
except (FileExistsError, AttributeError, ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
# 3rd stage: backward compat for the assert_spawning helper
if mp is not None:
from multiprocessing.context import assert_spawning
else:
assert_spawning = None

View file

@ -0,0 +1,610 @@
"""
Backends for embarrassingly parallel code.
"""
import gc
import os
import warnings
import threading
import functools
import contextlib
from abc import ABCMeta, abstractmethod
from .my_exceptions import WorkerInterrupt
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmappingPool
from multiprocessing.pool import ThreadPool
from .executor import get_memmapping_executor
# Compat between concurrent.futures and multiprocessing TimeoutError
from multiprocessing import TimeoutError
from concurrent.futures._base import TimeoutError as CfTimeoutError
from .externals.loky import process_executor, cpu_count
class ParallelBackendBase(metaclass=ABCMeta):
"""Helper abc which defines all methods a ParallelBackend must implement"""
supports_timeout = False
supports_inner_max_num_threads = False
nesting_level = None
def __init__(self, nesting_level=None, inner_max_num_threads=None,
**kwargs):
super().__init__(**kwargs)
self.nesting_level = nesting_level
self.inner_max_num_threads = inner_max_num_threads
MAX_NUM_THREADS_VARS = [
'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS',
'NUMEXPR_NUM_THREADS',
]
TBB_ENABLE_IPC_VAR = "ENABLE_IPC"
@abstractmethod
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing
n_jobs=-1 means requesting all available workers for instance matching
the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that
can actually perform work concurrently. The primary use case is to make
it possible for the caller to know in how many chunks to slice the
work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
"""
@abstractmethod
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
**backend_args):
"""Reconfigure the backend and return the number of workers.
This makes it possible to reuse an existing backend instance for
successive independent calls to Parallel with different parameters.
"""
self.parallel = parallel
return self.effective_n_jobs(n_jobs)
def start_call(self):
"""Call-back method called at the beginning of a Parallel call"""
def stop_call(self):
"""Call-back method called at the end of a Parallel call"""
def terminate(self):
"""Shutdown the workers and free the shared memory."""
def compute_batch_size(self):
"""Determine the optimal batch size"""
return 1
def batch_completed(self, batch_size, duration):
"""Callback indicate how long it took to run a batch"""
def get_exceptions(self):
"""List of exception types to be captured."""
return []
def abort_everything(self, ensure_ready=True):
"""Abort any running tasks
This is called when an exception has been raised when executing a tasks
and all the remaining tasks will be ignored and can therefore be
aborted to spare computation resources.
If ensure_ready is True, the backend should be left in an operating
state as future tasks might be re-submitted via that same backend
instance.
If ensure_ready is False, the implementer of this method can decide
to leave the backend in a closed / terminated state as no new task
are expected to be submitted to this backend.
Setting ensure_ready to False is an optimization that can be leveraged
when aborting tasks via killing processes from a local process pool
managed by the backend it-self: if we expect no new tasks, there is no
point in re-creating new workers.
"""
# Does nothing by default: to be overridden in subclasses when
# canceling tasks is possible.
pass
def get_nested_backend(self):
"""Backend instance to be used by nested Parallel calls.
By default a thread-based backend is used for the first level of
nesting. Beyond, switch to sequential backend to avoid spawning too
many threads on the host.
"""
nesting_level = getattr(self, 'nesting_level', 0) + 1
if nesting_level > 1:
return SequentialBackend(nesting_level=nesting_level), None
else:
return ThreadingBackend(nesting_level=nesting_level), None
@contextlib.contextmanager
def retrieval_context(self):
"""Context manager to manage an execution context.
Calls to Parallel.retrieve will be made inside this context.
By default, this does nothing. It may be useful for subclasses to
handle nested parallelism. In particular, it may be required to avoid
deadlocks if a backend manages a fixed number of workers, when those
workers may be asked to do nested Parallel calls. Without
'retrieval_context' this could lead to deadlock, as all the workers
managed by the backend may be "busy" waiting for the nested parallel
calls to finish, but the backend has no free workers to execute those
tasks.
"""
yield
def _prepare_worker_env(self, n_jobs):
"""Return environment variables limiting threadpools in external libs.
This function return a dict containing environment variables to pass
when creating a pool of process. These environment variables limit the
number of threads to `n_threads` for OpenMP, MKL, Accelerated and
OpenBLAS libraries in the child processes.
"""
explicit_n_threads = self.inner_max_num_threads
default_n_threads = str(max(cpu_count() // n_jobs, 1))
# Set the inner environment variables to self.inner_max_num_threads if
# it is given. Else, default to cpu_count // n_jobs unless the variable
# is already present in the parent process environment.
env = {}
for var in self.MAX_NUM_THREADS_VARS:
if explicit_n_threads is None:
var_value = os.environ.get(var, None)
if var_value is None:
var_value = default_n_threads
else:
var_value = str(explicit_n_threads)
env[var] = var_value
if self.TBB_ENABLE_IPC_VAR not in os.environ:
# To avoid over-subscription when using TBB, let the TBB schedulers
# use Inter Process Communication to coordinate:
env[self.TBB_ENABLE_IPC_VAR] = "1"
return env
@staticmethod
def in_main_thread():
return isinstance(threading.current_thread(), threading._MainThread)
class SequentialBackend(ParallelBackendBase):
"""A ParallelBackend which will execute all batches sequentially.
Does not use/create any threading objects, and hence has minimal
overhead. Used when n_jobs == 1.
"""
uses_threads = True
supports_sharedmem = True
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
return 1
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
result = ImmediateResult(func)
if callback:
callback(result)
return result
def get_nested_backend(self):
# import is not top level to avoid cyclic import errors.
from .parallel import get_active_backend
# SequentialBackend should neither change the nesting level, the
# default backend or the number of jobs. Just return the current one.
return get_active_backend()
class PoolManagerMixin(object):
"""A helper class for managing pool of workers."""
_pool = None
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
return n_jobs
def terminate(self):
"""Shutdown the process or thread pool"""
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
def _get_pool(self):
"""Used by apply_async to make it possible to implement lazy init"""
return self._pool
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
return self._get_pool().apply_async(
SafeFunction(func), callback=callback)
def abort_everything(self, ensure_ready=True):
"""Shutdown the pool and restart a new one with the same parameters"""
self.terminate()
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel,
**self.parallel._backend_args)
class AutoBatchingMixin(object):
"""A helper class for automagically batching jobs."""
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Batching counters default values
_DEFAULT_EFFECTIVE_BATCH_SIZE = 1
_DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
def compute_batch_size(self):
"""Determine the optimal batch size"""
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < self.MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(old_batch_size *
self.MIN_IDEAL_BATCH_DURATION /
batch_duration)
# Multiply by two to limit oscilations between min and max.
ideal_batch_size *= 2
# dont increase the batch size too fast to limit huge batch sizes
# potentially leading to starving worker
batch_size = min(2 * old_batch_size, ideal_batch_size)
batch_size = max(batch_size, 1)
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
"Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (batch_duration, batch_size))
elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
# decrease the batch size quickly to limit potential starving
ideal_batch_size = int(
old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
"Batch computation too slow (%.4fs.) "
"Setting batch_size=%d.", (batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = \
self._DEFAULT_SMOOTHED_BATCH_DURATION
return batch_size
def batch_completed(self, batch_size, duration):
"""Callback indicate how long it took to run a batch"""
if batch_size == self._effective_batch_size:
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self._smoothed_batch_duration
if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
# First record of duration for this batch size after the last
# reset.
new_duration = duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * duration
self._smoothed_batch_duration = new_duration
def reset_batch_stats(self):
"""Reset batch statistics to default values.
This avoids interferences with future jobs.
"""
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
"""A ParallelBackend which will use a thread pool to execute batches in.
This is a low-overhead backend but it suffers from the Python Global
Interpreter Lock if the called function relies a lot on Python objects.
Mostly useful when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped in a "with
nogil" block or an expensive call to a library such as NumPy).
The actual thread pool is lazily initialized: the actual thread pool
construction is delayed to the first call to apply_async.
ThreadingBackend is used as the default backend for nested calls.
"""
supports_timeout = True
uses_threads = True
supports_sharedmem = True
def configure(self, n_jobs=1, parallel=None, **backend_args):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
# Avoid unnecessary overhead and use sequential backend instead.
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
self.parallel = parallel
self._n_jobs = n_jobs
return n_jobs
def _get_pool(self):
"""Lazily initialize the thread pool
The actual pool of worker threads is only initialized at the first
call to apply_async.
"""
if self._pool is None:
self._pool = ThreadPool(self._n_jobs)
return self._pool
class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin,
ParallelBackendBase):
"""A ParallelBackend which will use a multiprocessing.Pool.
Will introduce some communication and memory overhead when exchanging
input and output data with the with the worker Python processes.
However, does not suffer from the Python Global Interpreter Lock.
"""
supports_timeout = True
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel.
This also checks if we are attempting to create a nested parallel
loop.
"""
if mp is None:
return 1
if mp.current_process().daemon:
# Daemonic processes cannot have children
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
if process_executor._CURRENT_DEPTH > 0:
# Mixing loky and multiprocessing in nested loop is not supported
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' below loky, setting n_jobs=1',
stacklevel=3)
return 1
elif not (self.in_main_thread() or self.nesting_level == 0):
# Prevent posix fork inside in non-main posix threads
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
**memmappingpool_args):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
# Make sure to free as much memory as possible before forking
gc.collect()
self._pool = MemmappingPool(n_jobs, **memmappingpool_args)
self.parallel = parallel
return n_jobs
def terminate(self):
"""Shutdown the process or thread pool"""
super(MultiprocessingBackend, self).terminate()
self.reset_batch_stats()
class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
"""Managing pool of workers with loky instead of multiprocessing."""
supports_timeout = True
supports_inner_max_num_threads = True
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
idle_worker_timeout=300, **memmappingexecutor_args):
"""Build a process executor and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
self._workers = get_memmapping_executor(
n_jobs, timeout=idle_worker_timeout,
env=self._prepare_worker_env(n_jobs=n_jobs),
context_id=parallel._id, **memmappingexecutor_args)
self.parallel = parallel
return n_jobs
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif mp.current_process().daemon:
# Daemonic processes cannot have children
if n_jobs != 1:
warnings.warn(
'Loky-backed parallel loops cannot be called in a'
' multiprocessing, setting n_jobs=1',
stacklevel=3)
return 1
elif not (self.in_main_thread() or self.nesting_level == 0):
# Prevent posix fork inside in non-main posix threads
if n_jobs != 1:
warnings.warn(
'Loky-backed parallel loops cannot be nested below '
'threads, setting n_jobs=1',
stacklevel=3)
return 1
elif n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
return n_jobs
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
future = self._workers.submit(SafeFunction(func))
future.get = functools.partial(self.wrap_future_result, future)
if callback is not None:
future.add_done_callback(callback)
return future
@staticmethod
def wrap_future_result(future, timeout=None):
"""Wrapper for Future.result to implement the same behaviour as
AsyncResults.get from multiprocessing."""
try:
return future.result(timeout=timeout)
except CfTimeoutError as e:
raise TimeoutError from e
def terminate(self):
if self._workers is not None:
# Don't terminate the workers as we want to reuse them in later
# calls, but cleanup the temporary resources that the Parallel call
# created. This 'hack' requires a private, low-level operation.
self._workers._temp_folder_manager._unlink_temporary_resources(
context_id=self.parallel._id
)
self._workers = None
self.reset_batch_stats()
def abort_everything(self, ensure_ready=True):
"""Shutdown the workers and restart a new one with the same parameters
"""
self._workers.terminate(kill_workers=True)
self._workers = None
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
class ImmediateResult(object):
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
class SafeFunction(object):
"""Wrapper that handles the serialization of exception tracebacks.
TODO python2_drop: check whether SafeFunction is still needed since we
dropped support for Python 2. If not needed anymore it should be
deprecated.
If an exception is triggered when calling the inner function, a copy of
the full traceback is captured to make it possible to serialize
it so that it can be rendered in a different Python process.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt as e:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt() from e
except BaseException:
# Rely on Python 3 built-in Remote Traceback reporting
raise
class FallbackToBackend(Exception):
"""Raised when configuration should fallback to another backend"""
def __init__(self, backend):
self.backend = backend

View file

@ -0,0 +1,414 @@
"""Storage providers backends for Memory caching."""
import re
import os
import os.path
import datetime
import json
import shutil
import warnings
import collections
import operator
import threading
from abc import ABCMeta, abstractmethod
from .backports import concurrency_safe_rename
from .disk import mkdirp, memstr_to_bytes, rm_subdirs
from . import numpy_pickle
CacheItemInfo = collections.namedtuple('CacheItemInfo',
'path size last_access')
def concurrency_safe_write(object_to_write, filename, write_func):
"""Writes an object into a unique file in a concurrency-safe way."""
thread_id = id(threading.current_thread())
temporary_filename = '{}.thread-{}-pid-{}'.format(
filename, thread_id, os.getpid())
write_func(object_to_write, temporary_filename)
return temporary_filename
class StoreBackendBase(metaclass=ABCMeta):
"""Helper Abstract Base Class which defines all methods that
a StorageBackend must implement."""
location = None
@abstractmethod
def _open_item(self, f, mode):
"""Opens an item on the store and return a file-like object.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
f: a file-like object
The file-like object where an item is stored and retrieved
mode: string, optional
the mode in which the file-like object is opened allowed valued are
'rb', 'wb'
Returns
-------
a file-like object
"""
@abstractmethod
def _item_exists(self, location):
"""Checks if an item location exists in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
location: string
The location of an item. On a filesystem, this corresponds to the
absolute path, including the filename, of a file.
Returns
-------
True if the item exists, False otherwise
"""
@abstractmethod
def _move_item(self, src, dst):
"""Moves an item from src to dst in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
src: string
The source location of an item
dst: string
The destination location of an item
"""
@abstractmethod
def create_location(self, location):
"""Creates a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory.
"""
@abstractmethod
def clear_location(self, location):
"""Clears a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory or a filename absolute path
"""
@abstractmethod
def get_items(self):
"""Returns the whole list of items available in the store.
Returns
-------
The list of items identified by their ids (e.g filename in a
filesystem).
"""
@abstractmethod
def configure(self, location, verbose=0, backend_options=dict()):
"""Configures the store.
Parameters
----------
location: string
The base location used by the store. On a filesystem, this
corresponds to a directory.
verbose: int
The level of verbosity of the store
backend_options: dict
Contains a dictionnary of named paremeters used to configure the
store backend.
"""
class StoreBackendMixin(object):
"""Class providing all logic for managing the store in a generic way.
The StoreBackend subclass has to implement 3 methods: create_location,
clear_location and configure. The StoreBackend also has to provide
a private _open_item, _item_exists and _move_item methods. The _open_item
method has to have the same signature as the builtin open and return a
file-like object.
"""
def load_item(self, path, verbose=1, msg=None):
"""Load an item from the store given its path as a list of
strings."""
full_path = os.path.join(self.location, *path)
if verbose > 1:
if verbose < 10:
print('{0}...'.format(msg))
else:
print('{0} from {1}'.format(msg, full_path))
mmap_mode = (None if not hasattr(self, 'mmap_mode')
else self.mmap_mode)
filename = os.path.join(full_path, 'output.pkl')
if not self._item_exists(filename):
raise KeyError("Non-existing item (may have been "
"cleared).\nFile %s does not exist" % filename)
# file-like object cannot be used when mmap_mode is set
if mmap_mode is None:
with self._open_item(filename, "rb") as f:
item = numpy_pickle.load(f)
else:
item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
return item
def dump_item(self, path, item, verbose=1):
"""Dump an item in the store at the path given as a list of
strings."""
try:
item_path = os.path.join(self.location, *path)
if not self._item_exists(item_path):
self.create_location(item_path)
filename = os.path.join(item_path, 'output.pkl')
if verbose > 10:
print('Persisting in %s' % item_path)
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
numpy_pickle.dump(to_write, f,
compress=self.compress)
self._concurrency_safe_write(item, filename, write_func)
except: # noqa: E722
" Race condition in the creation of the directory "
def clear_item(self, path):
"""Clear the item at the path, given as a list of strings."""
item_path = os.path.join(self.location, *path)
if self._item_exists(item_path):
self.clear_location(item_path)
def contains_item(self, path):
"""Check if there is an item at the path, given as a list of
strings"""
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'output.pkl')
return self._item_exists(filename)
def get_item_info(self, path):
"""Return information about item."""
return {'location': os.path.join(self.location,
*path)}
def get_metadata(self, path):
"""Return actual metadata of an item."""
try:
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'metadata.json')
with self._open_item(filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except: # noqa: E722
return {}
def store_metadata(self, path, metadata):
"""Store metadata of a computation."""
try:
item_path = os.path.join(self.location, *path)
self.create_location(item_path)
filename = os.path.join(item_path, 'metadata.json')
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
f.write(json.dumps(to_write).encode('utf-8'))
self._concurrency_safe_write(metadata, filename, write_func)
except: # noqa: E722
pass
def contains_path(self, path):
"""Check cached function is available in store."""
func_path = os.path.join(self.location, *path)
return self.object_exists(func_path)
def clear_path(self, path):
"""Clear all items with a common path in the store."""
func_path = os.path.join(self.location, *path)
if self._item_exists(func_path):
self.clear_location(func_path)
def store_cached_func_code(self, path, func_code=None):
"""Store the code of the cached function."""
func_path = os.path.join(self.location, *path)
if not self._item_exists(func_path):
self.create_location(func_path)
if func_code is not None:
filename = os.path.join(func_path, "func_code.py")
with self._open_item(filename, 'wb') as f:
f.write(func_code.encode('utf-8'))
def get_cached_func_code(self, path):
"""Store the code of the cached function."""
path += ['func_code.py', ]
filename = os.path.join(self.location, *path)
try:
with self._open_item(filename, 'rb') as f:
return f.read().decode('utf-8')
except: # noqa: E722
raise
def get_cached_func_info(self, path):
"""Return information related to the cached function if it exists."""
return {'location': os.path.join(self.location, *path)}
def clear(self):
"""Clear the whole store content."""
self.clear_location(self.location)
def reduce_store_size(self, bytes_limit):
"""Reduce store size to keep it under the given bytes limit."""
items_to_delete = self._get_items_to_delete(bytes_limit)
for item in items_to_delete:
if self.verbose > 10:
print('Deleting item {0}'.format(item))
try:
self.clear_location(item.path)
except OSError:
# Even with ignore_errors=True shutil.rmtree can raise OSError
# with:
# [Errno 116] Stale file handle if another process has deleted
# the folder already.
pass
def _get_items_to_delete(self, bytes_limit):
"""Get items to delete to keep the store under a size limit."""
if isinstance(bytes_limit, str):
bytes_limit = memstr_to_bytes(bytes_limit)
items = self.get_items()
size = sum(item.size for item in items)
to_delete_size = size - bytes_limit
if to_delete_size < 0:
return []
# We want to delete first the cache items that were accessed a
# long time ago
items.sort(key=operator.attrgetter('last_access'))
items_to_delete = []
size_so_far = 0
for item in items:
if size_so_far > to_delete_size:
break
items_to_delete.append(item)
size_so_far += item.size
return items_to_delete
def _concurrency_safe_write(self, to_write, filename, write_func):
"""Writes an object into a file in a concurrency-safe way."""
temporary_filename = concurrency_safe_write(to_write,
filename, write_func)
self._move_item(temporary_filename, filename)
def __repr__(self):
"""Printable representation of the store location."""
return '{class_name}(location="{location}")'.format(
class_name=self.__class__.__name__, location=self.location)
class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
"""A StoreBackend used with local or network file systems."""
_open_item = staticmethod(open)
_item_exists = staticmethod(os.path.exists)
_move_item = staticmethod(concurrency_safe_rename)
def clear_location(self, location):
"""Delete location on store."""
if (location == self.location):
rm_subdirs(location)
else:
shutil.rmtree(location, ignore_errors=True)
def create_location(self, location):
"""Create object location on store"""
mkdirp(location)
def get_items(self):
"""Returns the whole list of items available in the store."""
items = []
for dirpath, _, filenames in os.walk(self.location):
is_cache_hash_dir = re.match('[a-f0-9]{32}',
os.path.basename(dirpath))
if is_cache_hash_dir:
output_filename = os.path.join(dirpath, 'output.pkl')
try:
last_access = os.path.getatime(output_filename)
except OSError:
try:
last_access = os.path.getatime(dirpath)
except OSError:
# The directory has already been deleted
continue
last_access = datetime.datetime.fromtimestamp(last_access)
try:
full_filenames = [os.path.join(dirpath, fn)
for fn in filenames]
dirsize = sum(os.path.getsize(fn)
for fn in full_filenames)
except OSError:
# Either output_filename or one of the files in
# dirpath does not exist any more. We assume this
# directory is being cleaned by another process already
continue
items.append(CacheItemInfo(dirpath, dirsize,
last_access))
return items
def configure(self, location, verbose=1, backend_options=None):
"""Configure the store backend.
For this backend, valid store options are 'compress' and 'mmap_mode'
"""
if backend_options is None:
backend_options = {}
# setup location directory
self.location = location
if not os.path.exists(self.location):
mkdirp(self.location)
# item can be stored compressed for faster I/O
self.compress = backend_options.get('compress', False)
# FileSystemStoreBackend can be used with mmap_mode options under
# certain conditions.
mmap_mode = backend_options.get('mmap_mode')
if self.compress and mmap_mode is not None:
warnings.warn('Compressed items cannot be memmapped in a '
'filesystem store. Option will be ignored.',
stacklevel=2)
self.mmap_mode = mmap_mode
self.verbose = verbose

View file

@ -0,0 +1,78 @@
"""
Backports of fixes for joblib dependencies
"""
import os
import time
from distutils.version import LooseVersion
from os.path import basename
from multiprocessing import util
try:
import numpy as np
def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
shape=None, order='C', unlink_on_gc_collect=False):
"""Custom memmap constructor compatible with numpy.memmap.
This function:
- is a backport the numpy memmap offset fix (See
https://github.com/numpy/numpy/pull/8443 for more details.
The numpy fix is available starting numpy 1.13)
- adds ``unlink_on_gc_collect``, which specifies explicitly whether
the process re-constructing the memmap owns a reference to the
underlying file. If set to True, it adds a finalizer to the
newly-created memmap that sends a maybe_unlink request for the
memmaped file to resource_tracker.
"""
util.debug(
"[MEMMAP READ] creating a memmap (shape {}, filename {}, "
"pid {})".format(shape, basename(filename), os.getpid())
)
mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset,
shape=shape, order=order)
if LooseVersion(np.__version__) < '1.13':
mm.offset = offset
if unlink_on_gc_collect:
from ._memmapping_reducer import add_maybe_unlink_finalizer
add_maybe_unlink_finalizer(mm)
return mm
except ImportError:
def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
shape=None, order='C', unlink_on_gc_collect=False):
raise NotImplementedError(
"'joblib.backports.make_memmap' should not be used "
'if numpy is not installed.')
if os.name == 'nt':
# https://github.com/joblib/joblib/issues/540
access_denied_errors = (5, 13)
from os import replace
def concurrency_safe_rename(src, dst):
"""Renames ``src`` into ``dst`` overwriting ``dst`` if it exists.
On Windows os.replace can yield permission errors if executed by two
different processes.
"""
max_sleep_time = 1
total_sleep_time = 0
sleep_time = 0.001
while total_sleep_time < max_sleep_time:
try:
replace(src, dst)
break
except Exception as exc:
if getattr(exc, 'winerror', None) in access_denied_errors:
time.sleep(sleep_time)
total_sleep_time += sleep_time
sleep_time *= 2
else:
raise
else:
raise
else:
from os import replace as concurrency_safe_rename # noqa

View file

@ -0,0 +1,570 @@
"""Classes and functions for managing compressors."""
import io
import zlib
from distutils.version import LooseVersion
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
try:
import bz2
except ImportError:
bz2 = None
try:
import lz4
from lz4.frame import LZ4FrameFile
except ImportError:
lz4 = None
try:
import lzma
except ImportError:
lzma = None
LZ4_NOT_INSTALLED_ERROR = ('LZ4 is not installed. Install it with pip: '
'https://python-lz4.readthedocs.io/')
# Registered compressors
_COMPRESSORS = {}
# Magic numbers of supported compression file formats.
_ZFILE_PREFIX = b'ZF' # used with pickle files created before 0.9.3.
_ZLIB_PREFIX = b'\x78'
_GZIP_PREFIX = b'\x1f\x8b'
_BZ2_PREFIX = b'BZ'
_XZ_PREFIX = b'\xfd\x37\x7a\x58\x5a'
_LZMA_PREFIX = b'\x5d\x00'
_LZ4_PREFIX = b'\x04\x22\x4D\x18'
def register_compressor(compressor_name, compressor,
force=False):
"""Register a new compressor.
Parameters
-----------
compressor_name: str.
The name of the compressor.
compressor: CompressorWrapper
An instance of a 'CompressorWrapper'.
"""
global _COMPRESSORS
if not isinstance(compressor_name, str):
raise ValueError("Compressor name should be a string, "
"'{}' given.".format(compressor_name))
if not isinstance(compressor, CompressorWrapper):
raise ValueError("Compressor should implement the CompressorWrapper "
"interface, '{}' given.".format(compressor))
if (compressor.fileobj_factory is not None and
(not hasattr(compressor.fileobj_factory, 'read') or
not hasattr(compressor.fileobj_factory, 'write') or
not hasattr(compressor.fileobj_factory, 'seek') or
not hasattr(compressor.fileobj_factory, 'tell'))):
raise ValueError("Compressor 'fileobj_factory' attribute should "
"implement the file object interface, '{}' given."
.format(compressor.fileobj_factory))
if compressor_name in _COMPRESSORS and not force:
raise ValueError("Compressor '{}' already registered."
.format(compressor_name))
_COMPRESSORS[compressor_name] = compressor
class CompressorWrapper():
"""A wrapper around a compressor file object.
Attributes
----------
obj: a file-like object
The object must implement the buffer interface and will be used
internally to compress/decompress the data.
prefix: bytestring
A bytestring corresponding to the magic number that identifies the
file format associated to the compressor.
extention: str
The file extension used to automatically select this compressor during
a dump to a file.
"""
def __init__(self, obj, prefix=b'', extension=''):
self.fileobj_factory = obj
self.prefix = prefix
self.extension = extension
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
if compresslevel is None:
return self.fileobj_factory(fileobj, 'wb')
else:
return self.fileobj_factory(fileobj, 'wb',
compresslevel=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
return self.fileobj_factory(fileobj, 'rb')
class BZ2CompressorWrapper(CompressorWrapper):
prefix = _BZ2_PREFIX
extension = '.bz2'
def __init__(self):
if bz2 is not None:
self.fileobj_factory = bz2.BZ2File
else:
self.fileobj_factory = None
def _check_versions(self):
if bz2 is None:
raise ValueError('bz2 module is not compiled on your python '
'standard library.')
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
self._check_versions()
if compresslevel is None:
return self.fileobj_factory(fileobj, 'wb')
else:
return self.fileobj_factory(fileobj, 'wb',
compresslevel=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
self._check_versions()
fileobj = self.fileobj_factory(fileobj, 'rb')
return fileobj
class LZMACompressorWrapper(CompressorWrapper):
prefix = _LZMA_PREFIX
extension = '.lzma'
_lzma_format_name = 'FORMAT_ALONE'
def __init__(self):
if lzma is not None:
self.fileobj_factory = lzma.LZMAFile
self._lzma_format = getattr(lzma, self._lzma_format_name)
else:
self.fileobj_factory = None
def _check_versions(self):
if lzma is None:
raise ValueError('lzma module is not compiled on your python '
'standard library.')
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
if compresslevel is None:
return self.fileobj_factory(fileobj, 'wb',
format=self._lzma_format)
else:
return self.fileobj_factory(fileobj, 'wb',
format=self._lzma_format,
preset=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
return lzma.LZMAFile(fileobj, 'rb')
class XZCompressorWrapper(LZMACompressorWrapper):
prefix = _XZ_PREFIX
extension = '.xz'
_lzma_format_name = 'FORMAT_XZ'
class LZ4CompressorWrapper(CompressorWrapper):
prefix = _LZ4_PREFIX
extension = '.lz4'
def __init__(self):
if lz4 is not None:
self.fileobj_factory = LZ4FrameFile
else:
self.fileobj_factory = None
def _check_versions(self):
if lz4 is None:
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
lz4_version = lz4.__version__
if lz4_version.startswith("v"):
lz4_version = lz4_version[1:]
if LooseVersion(lz4_version) < LooseVersion('0.19'):
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
self._check_versions()
if compresslevel is None:
return self.fileobj_factory(fileobj, 'wb')
else:
return self.fileobj_factory(fileobj, 'wb',
compression_level=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
self._check_versions()
return self.fileobj_factory(fileobj, 'rb')
###############################################################################
# base file compression/decompression object definition
_MODE_CLOSED = 0
_MODE_READ = 1
_MODE_READ_EOF = 2
_MODE_WRITE = 3
_BUFFER_SIZE = 8192
class BinaryZlibFile(io.BufferedIOBase):
"""A file object providing transparent zlib (de)compression.
TODO python2_drop: is it still needed since we dropped Python 2 support A
BinaryZlibFile can act as a wrapper for an existing file object, or refer
directly to a named file on disk.
Note that BinaryZlibFile provides only a *binary* file interface: data read
is returned as bytes, and data to be written should be given as bytes.
This object is an adaptation of the BZ2File object and is compatible with
versions of python >= 2.7.
If filename is a str or bytes object, it gives the name
of the file to be opened. Otherwise, it should be a file object,
which will be used to read or write the compressed data.
mode can be 'rb' for reading (default) or 'wb' for (over)writing
If mode is 'wb', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 produces the most compression. 3 is the default.
"""
wbits = zlib.MAX_WBITS
def __init__(self, filename, mode="rb", compresslevel=3):
# This lock must be recursive, so that BufferedIOBase's
# readline(), readlines() and writelines() don't deadlock.
self._lock = RLock()
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
self.compresslevel = compresslevel
if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):
raise ValueError("'compresslevel' must be an integer "
"between 1 and 9. You provided 'compresslevel={}'"
.format(compresslevel))
if mode == "rb":
self._mode = _MODE_READ
self._decompressor = zlib.decompressobj(self.wbits)
self._buffer = b""
self._buffer_offset = 0
elif mode == "wb":
self._mode = _MODE_WRITE
self._compressor = zlib.compressobj(self.compresslevel,
zlib.DEFLATED, self.wbits,
zlib.DEF_MEM_LEVEL, 0)
else:
raise ValueError("Invalid mode: %r" % (mode,))
if isinstance(filename, str):
self._fp = io.open(filename, mode)
self._closefp = True
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
else:
raise TypeError("filename must be a str or bytes object, "
"or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
with self._lock:
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._buffer = b""
self._buffer_offset = 0
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
fname = getattr(self._fp, 'name', None)
msg = "I/O operation on closed file"
if fname is not None:
msg += " {}".format(fname)
msg += "."
raise ValueError(msg)
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("Seeking is only supported "
"on files open for reading")
if not self._fp.seekable():
raise io.UnsupportedOperation("The underlying file object "
"does not support seeking")
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
try:
rawblock = (self._decompressor.unused_data or
self._fp.read(_BUFFER_SIZE))
if not rawblock:
raise EOFError
except EOFError:
# End-of-stream marker and end of file. We're good.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n_bytes, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n_bytes
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset: end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while n_bytes > 0 and self._fill_buffer():
if n_bytes < len(self._buffer):
data = self._buffer[:n_bytes]
self._buffer_offset = n_bytes
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n_bytes -= len(data)
if return_data:
return b"".join(blocks)
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b'' if the file is already at EOF.
"""
with self._lock:
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns the number of bytes read (0 for EOF).
"""
with self._lock:
return io.BufferedIOBase.readinto(self, b)
def write(self, data):
"""Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
with self._lock:
self._check_can_write()
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = zlib.decompressobj(self.wbits)
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
with self._lock:
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: %s" % (whence,))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
with self._lock:
self._check_not_closed()
return self._pos
class ZlibCompressorWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=BinaryZlibFile,
prefix=_ZLIB_PREFIX, extension='.z')
class BinaryGzipFile(BinaryZlibFile):
"""A file object providing transparent gzip (de)compression.
If filename is a str or bytes object, it gives the name
of the file to be opened. Otherwise, it should be a file object,
which will be used to read or write the compressed data.
mode can be 'rb' for reading (default) or 'wb' for (over)writing
If mode is 'wb', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 produces the most compression. 3 is the default.
"""
wbits = 31 # zlib compressor/decompressor wbits value for gzip format.
class GzipCompressorWrapper(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=BinaryGzipFile,
prefix=_GZIP_PREFIX, extension='.gz')

View file

@ -0,0 +1,136 @@
"""
Disk management utilities.
"""
# Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Lars Buitinck
# Copyright (c) 2010 Gael Varoquaux
# License: BSD Style, 3 clauses.
import os
import sys
import time
import errno
import shutil
from multiprocessing import util
try:
WindowsError
except NameError:
WindowsError = OSError
def disk_used(path):
""" Return the disk usage in a directory."""
size = 0
for file in os.listdir(path) + ['.']:
stat = os.stat(os.path.join(path, file))
if hasattr(stat, 'st_blocks'):
size += stat.st_blocks * 512
else:
# on some platform st_blocks is not available (e.g., Windows)
# approximate by rounding to next multiple of 512
size += (stat.st_size // 512 + 1) * 512
# We need to convert to int to avoid having longs on some systems (we
# don't want longs to avoid problems we SQLite)
return int(size / 1024.)
def memstr_to_bytes(text):
""" Convert a memory text to its value in bytes.
"""
kilo = 1024
units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
try:
size = int(units[text[-1]] * float(text[:-1]))
except (KeyError, ValueError) as e:
raise ValueError(
"Invalid literal for size give: %s (type %s) should be "
"alike '10G', '500M', '50K'." % (text, type(text))) from e
return size
def mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
# exception. this mecanism ensures that the sub-process gc have the time to
# collect and close the memmaps before we fail.
RM_SUBDIRS_RETRY_TIME = 0.1
RM_SUBDIRS_N_RETRY = 5
def rm_subdirs(path, onerror=None):
"""Remove all subdirectories in this path.
The directory indicated by `path` is left in place, and its subdirectories
are erased.
If onerror is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If onerror is None,
an exception is raised.
"""
# NOTE this code is adapted from the one in shutil.rmtree, and is
# just as fast
names = []
try:
names = os.listdir(path)
except os.error:
if onerror is not None:
onerror(os.listdir, path, sys.exc_info())
else:
raise
for name in names:
fullname = os.path.join(path, name)
delete_folder(fullname, onerror=onerror)
def delete_folder(folder_path, onerror=None, allow_non_empty=True):
"""Utility function to cleanup a temporary folder if it still exists."""
if os.path.isdir(folder_path):
if onerror is not None:
shutil.rmtree(folder_path, False, onerror)
else:
# allow the rmtree to fail once, wait and re-try.
# if the error is raised again, fail
err_count = 0
while True:
files = os.listdir(folder_path)
try:
if len(files) == 0 or allow_non_empty:
shutil.rmtree(
folder_path, ignore_errors=False, onerror=None
)
util.debug(
"Sucessfully deleted {}".format(folder_path))
break
else:
raise OSError(
"Expected empty folder {} but got {} "
"files.".format(folder_path, len(files))
)
except (OSError, WindowsError):
err_count += 1
if err_count > RM_SUBDIRS_N_RETRY:
# the folder cannot be deleted right now. It maybe
# because some temporary files have not been deleted
# yet.
raise
time.sleep(RM_SUBDIRS_RETRY_TIME)

View file

@ -0,0 +1,120 @@
"""Utility function to construct a loky.ReusableExecutor with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
"""
# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>
# Copyright: 2017, Thomas Moreau
# License: BSD 3 clause
from ._memmapping_reducer import get_memmapping_reducers
from ._memmapping_reducer import TemporaryResourcesManager
from .externals.loky.reusable_executor import _ReusablePoolExecutor
_executor_args = None
def get_memmapping_executor(n_jobs, **kwargs):
return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
class MemmappingExecutor(_ReusablePoolExecutor):
@classmethod
def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None,
initargs=(), env=None, temp_folder=None,
context_id=None, **backend_args):
"""Factory for ReusableExecutor with automatic memmapping for large numpy
arrays.
"""
global _executor_args
# Check if we can reuse the executor here instead of deferring the test
# to loky as the reducers are objects that changes at each call.
executor_args = backend_args.copy()
executor_args.update(env if env else {})
executor_args.update(dict(
timeout=timeout, initializer=initializer, initargs=initargs))
reuse = _executor_args is None or _executor_args == executor_args
_executor_args = executor_args
manager = TemporaryResourcesManager(temp_folder)
# reducers access the temporary folder in which to store temporary
# pickles through a call to manager.resolve_temp_folder_name. resolving
# the folder name dynamically is useful to use different folders across
# calls of a same reusable executor
job_reducers, result_reducers = get_memmapping_reducers(
unlink_on_gc_collect=True,
temp_folder_resolver=manager.resolve_temp_folder_name,
**backend_args)
_executor, executor_is_reused = super().get_reusable_executor(
n_jobs, job_reducers=job_reducers, result_reducers=result_reducers,
reuse=reuse, timeout=timeout, initializer=initializer,
initargs=initargs, env=env
)
if not executor_is_reused:
# Only set a _temp_folder_manager for new executors. Reused
# executors already have a _temporary_folder_manager that must not
# be re-assigned like that because it is referenced in various
# places in the reducing machinery of the executor.
_executor._temp_folder_manager = manager
if context_id is not None:
# Only register the specified context once we know which manager
# the current executor is using, in order to not register an atexit
# finalizer twice for the same folder.
_executor._temp_folder_manager.register_new_context(context_id)
return _executor
def terminate(self, kill_workers=False):
self.shutdown(kill_workers=kill_workers)
if kill_workers:
# When workers are killed in such a brutal manner, they cannot
# execute the finalizer of their shared memmaps. The refcount of
# those memmaps may be off by an unknown number, so instead of
# decref'ing them, we delete the whole temporary folder, and
# unregister them. There is no risk of PermissionError at folder
# deletion because because at this point, all child processes are
# dead, so all references to temporary memmaps are closed.
# unregister temporary resources from all contexts
with self._submit_resize_lock:
self._temp_folder_manager._unregister_temporary_resources()
self._temp_folder_manager._try_delete_folder(
allow_non_empty=True
)
else:
self._temp_folder_manager._unlink_temporary_resources()
self._temp_folder_manager._try_delete_folder(allow_non_empty=True)
@property
def _temp_folder(self):
# Legacy property in tests. could be removed if we refactored the
# memmapping tests. SHOULD ONLY BE USED IN TESTS!
# We cache this property because it is called late in the tests - at
# this point, all context have been unregistered, and
# resolve_temp_folder_name raises an error.
if getattr(self, '_cached_temp_folder', None) is not None:
return self._cached_temp_folder
else:
self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
return self._cached_temp_folder
class _TestingMemmappingExecutor(MemmappingExecutor):
"""Wrapper around ReusableExecutor to ease memmapping testing with Pool
and Executor. This is only for testing purposes.
"""
def apply_async(self, func, args):
"""Schedule a func to be run"""
future = self.submit(func, *args)
future.get = future.result
return future
def map(self, f, *args):
return list(super().map(f, *args))

View file

View file

@ -0,0 +1,11 @@
from __future__ import absolute_import
from .cloudpickle import * # noqa
from .cloudpickle_fast import CloudPickler, dumps, dump # noqa
# Conform to the convention used by python serialization libraries, which
# expose their Pickler subclass at top-level under the "Pickler" name.
Pickler = CloudPickler
__version__ = '1.6.0'

View file

@ -0,0 +1,842 @@
"""
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from typing import Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
try: # pragma: no branch
import typing_extensions as _typing_extensions
from typing_extensions import Literal, Final
except ImportError:
_typing_extensions = Literal = Final = None
if sys.version_info >= (3, 5, 3):
from typing import ClassVar
else: # pragma: no cover
ClassVar = None
if sys.version_info >= (3, 8):
from types import CellType
else:
def f():
a = 1
def g():
return a
return g
CellType = type(f().__closure__[0])
# cloudpickle is meant for inter process communication: we expect all
# communicating processes to run the same Python version hence we favor
# communication speed over compatibility:
DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
# Track the provenance of reconstructed dynamic classes to make it possible to
# recontruct instances from the matching singleton class definition when
# appropriate and preserve the usual "isinstance" semantics of Python objects.
_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
PYPY = platform.python_implementation() == "PyPy"
builtin_code_type = None
if PYPY:
# builtin-code objects only exist in pypy
builtin_code_type = type(float.__new__.__code__)
_extract_code_globals_cache = weakref.WeakKeyDictionary()
def _get_or_create_tracker_id(class_def):
with _DYNAMIC_CLASS_TRACKER_LOCK:
class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
if class_tracker_id is None:
class_tracker_id = uuid.uuid4().hex
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
_DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
return class_tracker_id
def _lookup_class_or_track(class_tracker_id, class_def):
if class_tracker_id is not None:
with _DYNAMIC_CLASS_TRACKER_LOCK:
class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
class_tracker_id, class_def)
_DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
return class_def
def _whichmodule(obj, name):
"""Find the module an object belongs to.
This function differs from ``pickle.whichmodule`` in two ways:
- it does not mangle the cases where obj's module is __main__ and obj was
not found in any module.
- Errors arising during module introspection are ignored, as those errors
are considered unwanted side effects.
"""
if sys.version_info[:2] < (3, 7) and isinstance(obj, typing.TypeVar): # pragma: no branch # noqa
# Workaround bug in old Python versions: prior to Python 3.7,
# T.__module__ would always be set to "typing" even when the TypeVar T
# would be defined in a different module.
#
# For such older Python versions, we ignore the __module__ attribute of
# TypeVar instances and instead exhaustively lookup those instances in
# all currently imported modules.
module_name = None
else:
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr or
# other threads importing at the same time.
for module_name, module in sys.modules.copy().items():
# Some modules such as coverage can inject non-module objects inside
# sys.modules
if (
module_name == '__main__' or
module is None or
not isinstance(module, types.ModuleType)
):
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except Exception:
pass
return None
def _is_importable(obj, name=None):
"""Dispatcher utility to test the importability of various constructs."""
if isinstance(obj, types.FunctionType):
return _lookup_module_and_qualname(obj, name=name) is not None
elif issubclass(type(obj), type):
return _lookup_module_and_qualname(obj, name=name) is not None
elif isinstance(obj, types.ModuleType):
# We assume that sys.modules is primarily used as a cache mechanism for
# the Python import machinery. Checking if a module has been added in
# is sys.modules therefore a cheap and simple heuristic to tell us whether
# we can assume that a given module could be imported by name in
# another Python process.
return obj.__name__ in sys.modules
else:
raise TypeError(
"cannot check importability of {} instances".format(
type(obj).__name__)
)
def _lookup_module_and_qualname(obj, name=None):
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None: # pragma: no cover
# This used to be needed for Python 2.7 support but is probably not
# needed anymore. However we keep the __name__ introspection in case
# users of cloudpickle rely on this old behavior for unknown reasons.
name = getattr(obj, '__name__', None)
module_name = _whichmodule(obj, name)
if module_name is None:
# In this case, obj.__module__ is None AND obj was not found in any
# imported module. obj is thus treated as dynamic.
return None
if module_name == "__main__":
return None
# Note: if module_name is in sys.modules, the corresponding module is
# assumed importable at unpickling time. See #357
module = sys.modules.get(module_name, None)
if module is None:
# The main reason why obj's module would not be imported is that this
# module has been dynamically created, using for example
# types.ModuleType. The other possibility is that module was removed
# from sys.modules after obj was created/imported. But this case is not
# supported, as the standard pickle does not support it either.
return None
try:
obj2, parent = _getattribute(module, name)
except AttributeError:
# obj was not found inside the module it points to
return None
if obj2 is not obj:
return None
return module, name
def _extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
out_names = _extract_code_globals_cache.get(co)
if out_names is None:
names = co.co_names
out_names = {names[oparg] for _, oparg in _walk_global_ops(co)}
# Declaring a function inside another one using the "def ..."
# syntax generates a constant code object corresonding to the one
# of the nested function's As the nested function may itself need
# global variables, we need to introspect its code, extract its
# globals, (look for code object in it's co_consts attribute..) and
# add the result to code_globals
if co.co_consts:
for const in co.co_consts:
if isinstance(const, types.CodeType):
out_names |= _extract_code_globals(const)
_extract_code_globals_cache[co] = out_names
return out_names
def _find_imported_submodules(code, top_level_dependencies):
"""
Find currently imported submodules used by a function.
Submodules used by a function need to be detected and referenced for the
function to work correctly at depickling time. Because submodules can be
referenced as attribute of their parent package (``package.submodule``), we
need a special introspection technique that does not rely on GLOBAL-related
opcodes to find references of them in a code object.
Example:
```
import concurrent.futures
import cloudpickle
def func():
x = concurrent.futures.ThreadPoolExecutor
if __name__ == '__main__':
cloudpickle.dumps(func)
```
The globals extracted by cloudpickle in the function's state include the
concurrent package, but not its submodule (here, concurrent.futures), which
is the module used by func. Find_imported_submodules will detect the usage
of concurrent.futures. Saving this module alongside with func will ensure
that calling func once depickled does not fail due to concurrent.futures
not being imported
"""
subimports = []
# check if any known dependency is an imported package
for x in top_level_dependencies:
if (isinstance(x, types.ModuleType) and
hasattr(x, '__package__') and x.__package__):
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
# A concurrent thread could mutate sys.modules,
# make sure we iterate over a copy to avoid exceptions
for name in list(sys.modules):
# Older versions of pytest will add a "None" module to
# sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
subimports.append(sys.modules[name])
return subimports
def cell_set(cell, value):
"""Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file.
"""
if sys.version_info[:2] >= (3, 7): # pragma: no branch
cell.cell_contents = value
else:
_cell_set = types.FunctionType(
_cell_set_template_code, {}, '_cell_set', (), (cell,),)
_cell_set(value)
def _make_cell_set_template_code():
def _cell_set_factory(value):
lambda: cell
cell = value
co = _cell_set_factory.__code__
_cell_set_template_code = types.CodeType(
co.co_argcount,
co.co_kwonlyargcount, # Python 3 only argument
co.co_nlocals,
co.co_stacksize,
co.co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_cellvars, # co_freevars is initialized with co_cellvars
(), # co_cellvars is made empty
)
return _cell_set_template_code
if sys.version_info[:2] < (3, 7):
_cell_set_template_code = _make_cell_set_template_code()
# relevant opcodes
STORE_GLOBAL = opcode.opmap['STORE_GLOBAL']
DELETE_GLOBAL = opcode.opmap['DELETE_GLOBAL']
LOAD_GLOBAL = opcode.opmap['LOAD_GLOBAL']
GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
EXTENDED_ARG = dis.EXTENDED_ARG
_BUILTIN_TYPE_NAMES = {}
for k, v in types.__dict__.items():
if type(v) is type:
_BUILTIN_TYPE_NAMES[v] = k
def _builtin_type(name):
if name == "ClassType": # pragma: no cover
# Backward compat to load pickle files generated with cloudpickle
# < 1.3 even if loading pickle files from older versions is not
# officially supported.
return type
return getattr(types, name)
def _walk_global_ops(code):
"""
Yield (opcode, argument number) tuples for all
global-referencing instructions in *code*.
"""
for instr in dis.get_instructions(code):
op = instr.opcode
if op in GLOBAL_OPS:
yield op, instr.arg
def _extract_class_dict(cls):
"""Retrieve a copy of the dict of a class without the inherited methods"""
clsdict = dict(cls.__dict__) # copy dict proxy to a dict
if len(cls.__bases__) == 1:
inherited_dict = cls.__bases__[0].__dict__
else:
inherited_dict = {}
for base in reversed(cls.__bases__):
inherited_dict.update(base.__dict__)
to_remove = []
for name, value in clsdict.items():
try:
base_value = inherited_dict[name]
if value is base_value:
to_remove.append(name)
except KeyError:
pass
for name in to_remove:
clsdict.pop(name)
return clsdict
if sys.version_info[:2] < (3, 7): # pragma: no branch
def _is_parametrized_type_hint(obj):
# This is very cheap but might generate false positives.
# general typing Constructs
is_typing = getattr(obj, '__origin__', None) is not None
# typing_extensions.Literal
is_litteral = getattr(obj, '__values__', None) is not None
# typing_extensions.Final
is_final = getattr(obj, '__type__', None) is not None
# typing.Union/Tuple for old Python 3.5
is_union = getattr(obj, '__union_params__', None) is not None
is_tuple = getattr(obj, '__tuple_params__', None) is not None
is_callable = (
getattr(obj, '__result__', None) is not None and
getattr(obj, '__args__', None) is not None
)
return any((is_typing, is_litteral, is_final, is_union, is_tuple,
is_callable))
def _create_parametrized_type_hint(origin, args):
return origin[args]
else:
_is_parametrized_type_hint = None
_create_parametrized_type_hint = None
def parametrized_type_hint_getinitargs(obj):
# The distorted type check sematic for typing construct becomes:
# ``type(obj) is type(TypeHint)``, which means "obj is a
# parametrized TypeHint"
if type(obj) is type(Literal): # pragma: no branch
initargs = (Literal, obj.__values__)
elif type(obj) is type(Final): # pragma: no branch
initargs = (Final, obj.__type__)
elif type(obj) is type(ClassVar):
initargs = (ClassVar, obj.__type__)
elif type(obj) is type(Generic):
parameters = obj.__parameters__
if len(obj.__parameters__) > 0:
# in early Python 3.5, __parameters__ was sometimes
# preferred to __args__
initargs = (obj.__origin__, parameters)
else:
initargs = (obj.__origin__, obj.__args__)
elif type(obj) is type(Union):
if sys.version_info < (3, 5, 3): # pragma: no cover
initargs = (Union, obj.__union_params__)
else:
initargs = (Union, obj.__args__)
elif type(obj) is type(Tuple):
if sys.version_info < (3, 5, 3): # pragma: no cover
initargs = (Tuple, obj.__tuple_params__)
else:
initargs = (Tuple, obj.__args__)
elif type(obj) is type(Callable):
if sys.version_info < (3, 5, 3): # pragma: no cover
args = obj.__args__
result = obj.__result__
if args != Ellipsis:
if isinstance(args, tuple):
args = list(args)
else:
args = [args]
else:
(*args, result) = obj.__args__
if len(args) == 1 and args[0] is Ellipsis:
args = Ellipsis
else:
args = list(args)
initargs = (Callable, (args, result))
else: # pragma: no cover
raise pickle.PicklingError(
"Cloudpickle Error: Unknown type {}".format(type(obj))
)
return initargs
# Tornado support
def is_tornado_coroutine(func):
"""
Return whether *func* is a Tornado coroutine function.
Running coroutines are not supported.
"""
if 'tornado.gen' not in sys.modules:
return False
gen = sys.modules['tornado.gen']
if not hasattr(gen, "is_coroutine_function"):
# Tornado version is too old
return False
return gen.is_coroutine_function(func)
def _rebuild_tornado_coroutine(func):
from tornado import gen
return gen.coroutine(func)
# including pickles unloading functions in this namespace
load = pickle.load
loads = pickle.loads
# hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
def dynamic_subimport(name, vars):
mod = types.ModuleType(name)
mod.__dict__.update(vars)
mod.__dict__['__builtins__'] = builtins.__dict__
return mod
def _gen_ellipsis():
return Ellipsis
def _gen_not_implemented():
return NotImplemented
def _get_cell_contents(cell):
try:
return cell.cell_contents
except ValueError:
# sentinel used by ``_fill_function`` which will leave the cell empty
return _empty_cell_value
def instance(cls):
"""Create a new instance of a class.
Parameters
----------
cls : type
The class to create an instance of.
Returns
-------
instance : cls
A new instance of ``cls``.
"""
return cls()
@instance
class _empty_cell_value(object):
"""sentinel for empty closures
"""
@classmethod
def __reduce__(cls):
return cls.__name__
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
if 'kwdefaults' in state:
func.__kwdefaults__ = state['kwdefaults']
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
if '_cloudpickle_submodules' in state:
state.pop('_cloudpickle_submodules')
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError('this route should not be executed')
return (lambda: cell).__closure__[0]
def _make_cell(value=_empty_cell_value):
cell = _make_empty_cell()
if value is not _empty_cell_value:
cell_set(cell, value)
return cell
def _make_skel_func(code, cell_count, base_globals=None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
# This function is deprecated and should be removed in cloudpickle 1.7
warnings.warn(
"A pickle file created using an old (<=1.4.1) version of cloudpicke "
"is currently being loaded. This is not supported by cloudpickle and "
"will break in cloudpickle 1.7", category=UserWarning
)
# This is backward-compatibility code: for cloudpickle versions between
# 0.5.4 and 0.7, base_globals could be a string or None. base_globals
# should now always be a dictionary.
if base_globals is None or isinstance(base_globals, str):
base_globals = {}
base_globals['__builtins__'] = __builtins__
closure = (
tuple(_make_empty_cell() for _ in range(cell_count))
if cell_count >= 0 else
None
)
return types.FunctionType(code, base_globals, None, None, closure)
def _make_skeleton_class(type_constructor, name, bases, type_kwargs,
class_tracker_id, extra):
"""Build dynamic class with an empty __dict__ to be filled once memoized
If class_tracker_id is not None, try to lookup an existing class definition
matching that id. If none is found, track a newly reconstructed class
definition under that id so that other instances stemming from the same
class id will also reuse this class definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
skeleton_class = types.new_class(
name, bases, {'metaclass': type_constructor},
lambda ns: ns.update(type_kwargs)
)
return _lookup_class_or_track(class_tracker_id, skeleton_class)
def _rehydrate_skeleton_class(skeleton_class, class_dict):
"""Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info.
"""
registry = None
for attrname, attr in class_dict.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(skeleton_class, attrname, attr)
if registry is not None:
for subclass in registry:
skeleton_class.register(subclass)
return skeleton_class
def _make_skeleton_enum(bases, name, qualname, members, module,
class_tracker_id, extra):
"""Build dynamic enum with an empty __dict__ to be filled once memoized
The creation of the enum class is inspired by the code of
EnumMeta._create_.
If class_tracker_id is not None, try to lookup an existing enum definition
matching that id. If none is found, track a newly reconstructed enum
definition under that id so that other instances stemming from the same
class id will also reuse this enum definition.
The "extra" variable is meant to be a dict (or None) that can be used for
forward compatibility shall the need arise.
"""
# enums always inherit from their base Enum class at the last position in
# the list of base classes:
enum_base = bases[-1]
metacls = enum_base.__class__
classdict = metacls.__prepare__(name, bases)
for member_name, member_value in members.items():
classdict[member_name] = member_value
enum_class = metacls.__new__(metacls, name, bases, classdict)
enum_class.__module__ = module
enum_class.__qualname__ = qualname
return _lookup_class_or_track(class_tracker_id, enum_class)
def _make_typevar(name, bound, constraints, covariant, contravariant,
class_tracker_id):
tv = typing.TypeVar(
name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant
)
if class_tracker_id is not None:
return _lookup_class_or_track(class_tracker_id, tv)
else: # pragma: nocover
# Only for Python 3.5.3 compat.
return tv
def _decompose_typevar(obj):
try:
class_tracker_id = _get_or_create_tracker_id(obj)
except TypeError: # pragma: nocover
# TypeVar instances are not weakref-able in Python 3.5.3
class_tracker_id = None
return (
obj.__name__, obj.__bound__, obj.__constraints__,
obj.__covariant__, obj.__contravariant__,
class_tracker_id,
)
def _typevar_reduce(obj):
# TypeVar instances have no __qualname__ hence we pass the name explicitly.
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
if module_and_name is None:
return (_make_typevar, _decompose_typevar(obj))
return (getattr, module_and_name)
def _get_bases(typ):
if hasattr(typ, '__orig_bases__'):
# For generic types (see PEP 560)
bases_attr = '__orig_bases__'
else:
# For regular class objects
bases_attr = '__bases__'
return getattr(typ, bases_attr)
def _make_dict_keys(obj):
return dict.fromkeys(obj).keys()
def _make_dict_values(obj):
return {i: _ for i, _ in enumerate(obj)}.values()
def _make_dict_items(obj):
return obj.items()

View file

@ -0,0 +1,770 @@
"""
New, fast version of the CloudPickler.
This new CloudPickler class can now extend the fast C Pickler instead of the
previous Python implementation of the Pickler class. Because this functionality
is only available for Python versions 3.8+, a lot of backward-compatibility
code is also removed.
Note that the C Pickler sublassing API is CPython-specific. Therefore, some
guards present in cloudpickle.py that were written to handle PyPy specificities
are not present in cloudpickle_fast.py
"""
import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _is_importable,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items,
)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__slots__" in obj.__dict__:
type_kwargs["__slots__"] = obj.__slots__
__dict__ = obj.__dict__.get('__dict__', None)
if isinstance(__dict__, property):
type_kwargs['__dict__'] = __dict__
return (type(obj), obj.__name__, _get_bases(obj), type_kwargs,
_get_or_create_tracker_id(obj), None)
def _enum_getnewargs(obj):
members = dict((e.name, e.value) for e in obj)
return (obj.__bases__, obj.__name__, obj.__qualname__, members,
obj.__module__, _get_or_create_tracker_id(obj), None)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop('__weakref__', None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, dont pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop('_abc_cache', None)
clsdict.pop('_abc_negative_cache', None)
clsdict.pop('_abc_negative_cache_version', None)
registry = clsdict.pop('_abc_registry', None)
if registry is None:
# in Python3.7+, the abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop('_abc_impl', None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref()
for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop('__dict__', None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = dict((e.name, e.value) for e in obj)
# Cleanup the clsdict that will be passed to _rehydrate_skeleton_class:
# Those attributes are already handled by the metaclass.
for attrname in ["_generate_next_value_", "_member_names_",
"_member_map_", "_member_type_",
"_value2member_map_"]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""codeobject reducer"""
if hasattr(obj, "co_posonlyargcount"): # pragma: no branch
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, )
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _is_importable(obj):
return subimport, (obj.__name__,)
else:
obj.__dict__.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, vars(obj))
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _is_importable(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), )
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), )
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj), )
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynaamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (types.FunctionType, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _is_importable(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `Cloudpickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# distpatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _is_importable(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _is_importable(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function

View file

@ -0,0 +1,13 @@
import sys
if sys.version_info < (3, 8):
try:
import pickle5 as pickle # noqa: F401
from pickle5 import Pickler # noqa: F401
except ImportError:
import pickle # noqa: F401
from pickle import _Pickler as Pickler # noqa: F401
else:
import pickle # noqa: F401
from _pickle import Pickler # noqa: F401

View file

@ -0,0 +1,25 @@
r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
It provides a robust and dynamic implementation os the
:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
hide the pool management under the hood.
"""
from ._base import Executor, Future
from ._base import wait, as_completed
from ._base import TimeoutError, CancelledError
from ._base import ALL_COMPLETED, FIRST_COMPLETED, FIRST_EXCEPTION
from .backend.context import cpu_count
from .backend.reduction import set_loky_pickler
from .reusable_executor import get_reusable_executor
from .cloudpickle_wrapper import wrap_non_picklable_objects
from .process_executor import BrokenProcessPool, ProcessPoolExecutor
__all__ = ["get_reusable_executor", "cpu_count", "wait", "as_completed",
"Future", "Executor", "ProcessPoolExecutor",
"BrokenProcessPool", "CancelledError", "TimeoutError",
"FIRST_COMPLETED", "FIRST_EXCEPTION", "ALL_COMPLETED",
"wrap_non_picklable_objects", "set_loky_pickler"]
__version__ = '2.9.0'

View file

@ -0,0 +1,627 @@
###############################################################################
# Backport concurrent.futures for python2.7/3.3
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from concurrent/futures/_base.py (17/02/2017)
# * Do not use yield from
# * Use old super syntax
#
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import sys
import time
import logging
import threading
import collections
if sys.version_info[:2] >= (3, 3):
from concurrent.futures import wait, as_completed
from concurrent.futures import TimeoutError, CancelledError
from concurrent.futures import Executor, Future as _BaseFuture
from concurrent.futures import FIRST_EXCEPTION
from concurrent.futures import ALL_COMPLETED, FIRST_COMPLETED
from concurrent.futures._base import LOGGER
from concurrent.futures._base import PENDING, RUNNING, CANCELLED
from concurrent.futures._base import CANCELLED_AND_NOTIFIED, FINISHED
else:
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions.
"""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED]
for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different
Executors) to iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete
(finished or cancelled). If any given Futures are duplicated, they
will be returned once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError('%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different
Executors) to wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The
options are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are
cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class _BaseFuture(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing.
"""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The
callable will always be called by a thread in the same
process in which it was added. If the future has already
completed or been cancelled then the callable will be
called immediately. These callables are called in the order
that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the
future isn't done. If None, then there is no limit on the
wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the
given timeout.
Exception: If the call raised then that exception will be
raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future
represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the
wait time.
Returns:
The exception raised by the call that the future represents or
None if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the
given timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though
calls to as_completed() or wait()) are notified and False is
returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method
returns False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if
set_result() or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors.
"""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and
returns a Future instance representing the execution of the
callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then
there is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken
into before being passed to a child process. This argument
is only used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls
may be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be
generated before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by
the executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
# To make loky._base.Future instances awaitable by concurrent.futures.wait,
# derive our custom Future class from _BaseFuture. _invoke_callback is the only
# modification made to this class in loky.
class Future(_BaseFuture):
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except BaseException:
LOGGER.exception('exception calling callback for %r', self)

View file

@ -0,0 +1,16 @@
import os
import sys
from .context import get_context
if sys.version_info > (3, 4):
def _make_name():
name = '/loky-%i-%s' % (os.getpid(), next(synchronize.SemLock._rand))
return name
# monkey patch the name creation for multiprocessing
from multiprocessing import synchronize
synchronize.SemLock._make_name = staticmethod(_make_name)
__all__ = ["get_context"]

View file

@ -0,0 +1,76 @@
###############################################################################
# Extra reducers for Unix based system and connections objects
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Add adapted reduction for LokyProcesses and socket/Connection
#
import os
import sys
import socket
import _socket
from .reduction import register
from .context import get_spawning_popen
if sys.version_info >= (3, 3):
from multiprocessing.connection import Connection
else:
from _multiprocessing import Connection
HAVE_SEND_HANDLE = (hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg'))
def _mk_inheritable(fd):
if sys.version_info[:2] > (3, 3):
os.set_inheritable(fd, True)
return fd
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE and sys.version_info[:2] > (3, 3):
from multiprocessing import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise TypeError(
'Cannot pickle connection object. This object can only be '
'passed when spawning a new process'
)
if sys.version_info[:2] != (3, 3):
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.fromfd(fd, family, type, proto)
else:
from multiprocessing.reduction import reduce_socket as _reduce_socket
register(socket.socket, _reduce_socket)
register(_socket.socket, _reduce_socket)
if sys.version_info[:2] != (3, 3):
def reduce_connection(conn):
df = DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
else:
from multiprocessing.reduction import reduce_connection
register(Connection, reduce_connection)

View file

@ -0,0 +1,105 @@
###############################################################################
# Compat for wait function on UNIX based system
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/connection.py (17/02/2017)
# * Backport wait function to python2.7
#
import platform
import select
import socket
import errno
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else: # pragma: no cover
from time import time as monotonic
if hasattr(select, 'poll'):
def _poll(fds, timeout):
if timeout is not None:
timeout = int(timeout * 1000) # timeout is in milliseconds
fd_map = {}
pollster = select.poll()
for fd in fds:
pollster.register(fd, select.POLLIN)
if hasattr(fd, 'fileno'):
fd_map[fd.fileno()] = fd
else:
fd_map[fd] = fd
ls = []
for fd, event in pollster.poll(timeout):
if event & select.POLLNVAL: # pragma: no cover
raise ValueError('invalid file descriptor %i' % fd)
ls.append(fd_map[fd])
return ls
else:
def _poll(fds, timeout):
return select.select(fds, [], [], timeout)[0]
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects which are ready/readable.
'''
if timeout is not None:
if timeout <= 0:
return _poll(object_list, 0)
else:
deadline = monotonic() + timeout
while True:
try:
return _poll(object_list, timeout)
except (OSError, IOError, socket.error) as e: # pragma: no cover
if e.errno != errno.EINTR:
raise
if timeout is not None:
timeout = deadline - monotonic()

View file

@ -0,0 +1,99 @@
###############################################################################
# Extra reducers for Windows system and connections objects
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Add adapted reduction for LokyProcesses and socket/PipeConnection
#
import os
import sys
import socket
from .reduction import register
if sys.platform == 'win32':
if sys.version_info[:2] < (3, 3):
from _multiprocessing import PipeConnection
else:
import _winapi
from multiprocessing.connection import PipeConnection
if sys.version_info[:2] >= (3, 4) and sys.platform == 'win32':
class DupHandle(object):
def __init__(self, handle, access, pid=None):
# duplicate handle for process with given pid
if pid is None:
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
return self._handle
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
from multiprocessing.connection import PipeConnection
handle = dh.detach()
return PipeConnection(handle, readable, writable)
register(PipeConnection, reduce_pipe_connection)
elif sys.platform == 'win32':
# Older Python versions
from multiprocessing.reduction import reduce_pipe_connection
register(PipeConnection, reduce_pipe_connection)
if sys.version_info[:2] < (3, 3) and sys.platform == 'win32':
from _multiprocessing import win32
from multiprocessing.reduction import reduce_handle, rebuild_handle
close = win32.CloseHandle
def fromfd(handle, family, type_, proto=0):
s = socket.socket(family, type_, proto, fileno=handle)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
if not hasattr(socket, "fromfd"):
raise TypeError("sockets cannot be pickled on this system.")
reduced_handle = reduce_handle(s.fileno())
return _rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def _rebuild_socket(reduced_handle, family, type_, proto):
handle = rebuild_handle(reduced_handle)
s = fromfd(handle, family, type_, proto)
close(handle)
return s
register(socket.socket, reduce_socket)
elif sys.version_info[:2] < (3, 4):
from multiprocessing.reduction import reduce_socket
register(socket.socket, reduce_socket)
else:
from multiprocessing.reduction import _reduce_socket
register(socket.socket, _reduce_socket)

View file

@ -0,0 +1,58 @@
###############################################################################
# Compat for wait function on Windows system
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/connection.py (17/02/2017)
# * Backport wait function to python2.7
#
import ctypes
import sys
from time import sleep
if sys.platform == 'win32' and sys.version_info[:2] < (3, 3):
from _subprocess import WaitForSingleObject, WAIT_OBJECT_0
try:
from time import monotonic
except ImportError:
# Backward old for crappy old Python that did not have cross-platform
# monotonic clock by default.
# TODO: do we want to add support for cygwin at some point? See:
# https://github.com/atdt/monotonic/blob/master/monotonic.py
GetTickCount64 = ctypes.windll.kernel32.GetTickCount64
GetTickCount64.restype = ctypes.c_ulonglong
def monotonic():
"""Monotonic clock, cannot go backward."""
return GetTickCount64() / 1000.0
def wait(handles, timeout=None):
"""Backward compat for python2.7
This function wait for either:
* one connection is ready for read,
* one process handle has exited or got killed,
* timeout is reached. Note that this function has a precision of 2
msec.
"""
if timeout is not None:
deadline = monotonic() + timeout
while True:
# We cannot use select as in windows it only support sockets
ready = []
for h in handles:
if type(h) in [int, long]:
if WaitForSingleObject(h, 0) == WAIT_OBJECT_0:
ready += [h]
elif h.poll(0):
ready.append(h)
if len(ready) > 0:
return ready
sleep(.001)
if timeout is not None and deadline - monotonic() <= 0:
return []

View file

@ -0,0 +1,41 @@
###############################################################################
# Compat file to import the correct modules for each platform and python
# version.
#
# author: Thomas Moreau and Olivier grisel
#
import sys
PY3 = sys.version_info[:2] >= (3, 3)
if PY3:
import queue
else:
import Queue as queue
if sys.version_info >= (3, 4):
from multiprocessing.process import BaseProcess
else:
from multiprocessing.process import Process as BaseProcess
# Platform specific compat
if sys.platform == "win32":
from .compat_win32 import wait
else:
from .compat_posix import wait
def set_cause(exc, cause):
exc.__cause__ = cause
if not PY3:
# Preformat message here.
if exc.__cause__ is not None:
exc.args = ("{}\n\nThis was caused directly by {}".format(
exc.args if len(exc.args) != 1 else exc.args[0],
str(exc.__cause__)),)
return exc
__all__ = ["queue", "BaseProcess", "set_cause", "wait"]

View file

@ -0,0 +1,13 @@
# flake8: noqa
###############################################################################
# Compat file to load the correct wait function
#
# author: Thomas Moreau and Olivier grisel
#
import sys
# Compat wait
if sys.version_info < (3, 3):
from ._posix_wait import wait
else:
from multiprocessing.connection import wait

View file

@ -0,0 +1,46 @@
# flake8: noqa: F401
import sys
import numbers
if sys.platform == "win32":
# Avoid import error by code introspection tools such as test runners
# trying to import this module while running on non-Windows systems.
# Compat Popen
if sys.version_info[:2] >= (3, 4):
from multiprocessing.popen_spawn_win32 import Popen
else:
from multiprocessing.forking import Popen
# wait compat
if sys.version_info[:2] < (3, 3):
from ._win_wait import wait
else:
from multiprocessing.connection import wait
# Compat _winapi
if sys.version_info[:2] >= (3, 4):
import _winapi
else:
import os
import msvcrt
if sys.version_info[:2] < (3, 3):
import _subprocess as win_api
from _multiprocessing import win32
else:
import _winapi as win_api
class _winapi:
CreateProcess = win_api.CreateProcess
@staticmethod
def CloseHandle(h):
if isinstance(h, numbers.Integral):
# Cast long to int for 64-bit Python 2.7 under Windows
h = int(h)
if sys.version_info[:2] < (3, 3):
if not isinstance(h, int):
h = h.Detach()
win32.CloseHandle(h)
else:
win_api.CloseHandle(h)

View file

@ -0,0 +1,367 @@
###############################################################################
# Basic context management with LokyContext and provides
# compat for UNIX 2.7 and 3.3
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/context.py
# * Create a context ensuring loky uses only objects that are compatible
# * Add LokyContext to the list of context of multiprocessing so loky can be
# used with multiprocessing.set_start_method
# * Add some compat function for python2.7 and 3.3.
#
from __future__ import division
import os
import sys
import subprocess
import traceback
import warnings
import multiprocessing as mp
from .process import LokyProcess, LokyInitMainProcess
START_METHODS = ['loky', 'loky_init_main']
_DEFAULT_START_METHOD = None
# Cache for the number of physical cores to avoid repeating subprocess calls.
# It should not change during the lifetime of the program.
physical_cores_cache = None
if sys.version_info[:2] >= (3, 4):
from multiprocessing import get_context as mp_get_context
from multiprocessing.context import assert_spawning, set_spawning_popen
from multiprocessing.context import get_spawning_popen, BaseContext
START_METHODS += ['spawn']
if sys.platform != 'win32':
START_METHODS += ['fork', 'forkserver']
def get_context(method=None):
# Try to overload the default context
method = method or _DEFAULT_START_METHOD or "loky"
if method == "fork":
# If 'fork' is explicitly requested, warn user about potential
# issues.
warnings.warn("`fork` start method should not be used with "
"`loky` as it does not respect POSIX. Try using "
"`spawn` or `loky` instead.", UserWarning)
try:
context = mp_get_context(method)
except ValueError:
raise ValueError("Unknown context '{}'. Value should be in {}."
.format(method, START_METHODS))
return context
else:
if sys.platform != 'win32':
import threading
# Mechanism to check that the current thread is spawning a process
_tls = threading.local()
popen_attr = 'spawning_popen'
else:
from multiprocessing.forking import Popen
_tls = Popen._tls
popen_attr = 'process_handle'
BaseContext = object
def get_spawning_popen():
return getattr(_tls, popen_attr, None)
def set_spawning_popen(popen):
setattr(_tls, popen_attr, popen)
def assert_spawning(obj):
if get_spawning_popen() is None:
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(obj).__name__
)
def get_context(method=None):
method = method or _DEFAULT_START_METHOD or 'loky'
if method == "loky":
return LokyContext()
elif method == "loky_init_main":
return LokyInitMainContext()
else:
raise ValueError("Unknown context '{}'. Value should be in {}."
.format(method, START_METHODS))
def set_start_method(method, force=False):
global _DEFAULT_START_METHOD
if _DEFAULT_START_METHOD is not None and not force:
raise RuntimeError('context has already been set')
assert method is None or method in START_METHODS, (
"'{}' is not a valid start_method. It should be in {}"
.format(method, START_METHODS))
_DEFAULT_START_METHOD = method
def get_start_method():
return _DEFAULT_START_METHOD
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs the current process can use.
The returned number of CPUs accounts for:
* the number of CPUs in the system, as given by
``multiprocessing.cpu_count``;
* the CPU affinity settings of the current process
(available with Python 3.4+ on some Unix systems);
* CFS scheduler CPU bandwidth limit (available on Linux only, typically
set by docker and similar container orchestration systems);
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
and is given as the minimum of these constraints.
If ``only_physical_cores`` is True, return the number of physical cores
instead of the number of logical cores (hyperthreading / SMT). Note that
this option is not enforced if the number of usable cores is controlled in
any other way such as: process affinity, restricting CFS scheduler policy
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
cores is not found, return the number of logical cores.
It is also always larger or equal to 1.
"""
# TODO: use os.cpu_count when dropping python 2 support
try:
cpu_count_mp = mp.cpu_count()
except NotImplementedError:
cpu_count_mp = 1
cpu_count_user = _cpu_count_user(cpu_count_mp)
aggregate_cpu_count = min(cpu_count_mp, cpu_count_user)
if only_physical_cores:
cpu_count_physical, exception = _count_physical_cores()
if cpu_count_user < cpu_count_mp:
# Respect user setting
cpu_count = max(cpu_count_user, 1)
elif cpu_count_physical == "not found":
# Fallback to default behavior
if exception is not None:
# warns only the first time
warnings.warn(
"Could not find the number of physical cores for the "
"following reason:\n" + str(exception) + "\n"
"Returning the number of logical cores instead. You can "
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
"the number of cores you want to use.")
if sys.version_info >= (3, 5):
# TODO remove the version check when dropping py2 support
traceback.print_tb(exception.__traceback__)
cpu_count = max(aggregate_cpu_count, 1)
else:
return cpu_count_physical
else:
cpu_count = max(aggregate_cpu_count, 1)
return cpu_count
def _cpu_count_user(cpu_count_mp):
"""Number of user defined available CPUs"""
import math
# Number of available CPUs given affinity settings
cpu_count_affinity = cpu_count_mp
if hasattr(os, 'sched_getaffinity'):
try:
cpu_count_affinity = len(os.sched_getaffinity(0))
except NotImplementedError:
pass
# CFS scheduler CPU bandwidth limit
# available in Linux since 2.6 kernel
cpu_count_cfs = cpu_count_mp
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
if os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
with open(cfs_quota_fname, 'r') as fh:
cfs_quota_us = int(fh.read())
with open(cfs_period_fname, 'r') as fh:
cfs_period_us = int(fh.read())
if cfs_quota_us > 0 and cfs_period_us > 0:
# Make sure this quantity is an int as math.ceil returns a
# float in python2.7. (See issue #165)
cpu_count_cfs = int(math.ceil(cfs_quota_us / cfs_period_us))
# User defined soft-limit passed as a loky specific environment variable.
cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', cpu_count_mp))
return min(cpu_count_affinity, cpu_count_cfs, cpu_count_loky)
def _count_physical_cores():
"""Return a tuple (number of physical cores, exception)
If the number of physical cores is found, exception is set to None.
If it has not been found, return ("not found", exception).
The number of physical cores is cached to avoid repeating subprocess calls.
"""
exception = None
# First check if the value is cached
global physical_cores_cache
if physical_cores_cache is not None:
return physical_cores_cache, exception
# Not cached yet, find it
try:
if sys.platform == "linux":
cpu_info = subprocess.run(
"lscpu --parse=core".split(" "), capture_output=True)
cpu_info = cpu_info.stdout.decode("utf-8").splitlines()
cpu_info = {line for line in cpu_info if not line.startswith("#")}
cpu_count_physical = len(cpu_info)
elif sys.platform == "win32":
cpu_info = subprocess.run(
"wmic CPU Get NumberOfCores /Format:csv".split(" "),
capture_output=True)
cpu_info = cpu_info.stdout.decode('utf-8').splitlines()
cpu_info = [l.split(",")[1] for l in cpu_info
if (l and l != "Node,NumberOfCores")]
cpu_count_physical = sum(map(int, cpu_info))
elif sys.platform == "darwin":
cpu_info = subprocess.run(
"sysctl -n hw.physicalcpu".split(" "), capture_output=True)
cpu_info = cpu_info.stdout.decode('utf-8')
cpu_count_physical = int(cpu_info)
else:
raise NotImplementedError(
"unsupported platform: {}".format(sys.platform))
# if cpu_count_physical < 1, we did not find a valid value
if cpu_count_physical < 1:
raise ValueError(
"found {} physical cores < 1".format(cpu_count_physical))
except Exception as e:
exception = e
cpu_count_physical = "not found"
# Put the result in cache
physical_cores_cache = cpu_count_physical
return cpu_count_physical, exception
class LokyContext(BaseContext):
"""Context relying on the LokyProcess."""
_name = 'loky'
Process = LokyProcess
cpu_count = staticmethod(cpu_count)
def Queue(self, maxsize=0, reducers=None):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, reducers=reducers,
ctx=self.get_context())
def SimpleQueue(self, reducers=None):
'''Returns a queue object'''
from .queues import SimpleQueue
return SimpleQueue(reducers=reducers, ctx=self.get_context())
if sys.version_info[:2] < (3, 4):
"""Compat for python2.7/3.3 for necessary methods in Context"""
def get_context(self):
return self
def get_start_method(self):
return self._name
def Pipe(self, duplex=True):
'''Returns two connection object connected by a pipe'''
return mp.Pipe(duplex)
if sys.platform != "win32":
"""Use the compat Manager for python2.7/3.3 on UNIX to avoid
relying on fork processes
"""
def Manager(self):
"""Returns a manager object"""
from .managers import LokyManager
m = LokyManager()
m.start()
return m
else:
"""Compat for context on Windows and python2.7/3.3. Using regular
multiprocessing objects as it does not rely on fork.
"""
from multiprocessing import synchronize
Semaphore = staticmethod(synchronize.Semaphore)
BoundedSemaphore = staticmethod(synchronize.BoundedSemaphore)
Lock = staticmethod(synchronize.Lock)
RLock = staticmethod(synchronize.RLock)
Condition = staticmethod(synchronize.Condition)
Event = staticmethod(synchronize.Event)
Manager = staticmethod(mp.Manager)
if sys.platform != "win32":
"""For Unix platform, use our custom implementation of synchronize
relying on ctypes to interface with pthread semaphores.
"""
def Semaphore(self, value=1):
"""Returns a semaphore object"""
from .synchronize import Semaphore
return Semaphore(value=value)
def BoundedSemaphore(self, value):
"""Returns a bounded semaphore object"""
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Lock(self):
"""Returns a lock object"""
from .synchronize import Lock
return Lock()
def RLock(self):
"""Returns a recurrent lock object"""
from .synchronize import RLock
return RLock()
def Condition(self, lock=None):
"""Returns a condition object"""
from .synchronize import Condition
return Condition(lock)
def Event(self):
"""Returns an event object"""
from .synchronize import Event
return Event()
class LokyInitMainContext(LokyContext):
"""Extra context with LokyProcess, which does load the main module
This context is used for compatibility in the case ``cloudpickle`` is not
present on the running system. This permits to load functions defined in
the ``main`` module, using proper safeguards. The declaration of the
``executor`` should be protected by ``if __name__ == "__main__":`` and the
functions and variable used from main should be out of this block.
This mimics the default behavior of multiprocessing under Windows and the
behavior of the ``spawn`` start method on a posix system for python3.4+.
For more details, see the end of the following section of python doc
https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
"""
_name = 'loky_init_main'
Process = LokyInitMainProcess
if sys.version_info > (3, 4):
"""Register loky context so it works with multiprocessing.get_context"""
ctx_loky = LokyContext()
mp.context._concrete_contexts['loky'] = ctx_loky
mp.context._concrete_contexts['loky_init_main'] = LokyInitMainContext()

View file

@ -0,0 +1,48 @@
###############################################################################
# Launch a subprocess using forkexec and make sure only the needed fd are
# shared in the two process.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
if sys.platform == "darwin" and sys.version_info < (3, 3):
FileNotFoundError = OSError
def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set(fd for fd in range(3, max_nfds))
open_fds.add(0)
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass
def fork_exec(cmd, keep_fds, env=None):
# copy the environment variables to set in the child process
env = {} if env is None else env
child_env = os.environ.copy()
child_env.update(env)
pid = os.fork()
if pid == 0: # pragma: no cover
close_fds(keep_fds)
os.execve(sys.executable, cmd, child_env)
else:
return pid

View file

@ -0,0 +1,51 @@
###############################################################################
# compat for UNIX 2.7 and 3.3
# Manager with LokyContext server.
# This avoids having a Manager using fork and breaks the fd.
#
# author: Thomas Moreau and Olivier Grisel
#
# based on multiprocessing/managers.py (17/02/2017)
# * Overload the start method to use LokyContext and launch a loky subprocess
#
import multiprocessing as mp
from multiprocessing.managers import SyncManager, State
from .process import LokyProcess as Process
class LokyManager(SyncManager):
def start(self, initializer=None, initargs=()):
'''Spawn a server process for this manager object'''
assert self._state.value == State.INITIAL
if (initializer is not None
and not hasattr(initializer, '__call__')):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = mp.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, bytes(self._authkey),
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = mp.util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)

View file

@ -0,0 +1,215 @@
###############################################################################
# Popen for LokyProcess.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
import signal
import pickle
from io import BytesIO
from . import reduction, spawn
from .context import get_spawning_popen, set_spawning_popen
from multiprocessing import util, process
if sys.version_info[:2] < (3, 3):
ProcessLookupError = OSError
if sys.platform != "win32":
from . import resource_tracker
__all__ = []
if sys.platform != "win32":
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, fd):
self.fd = reduction._mk_inheritable(fd)
def detach(self):
return self.fd
#
# Start child process using subprocess.Popen
#
__all__.append('Popen')
class Popen(object):
method = 'loky'
DupFd = _DupFd
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._fds = []
self._launch(process_obj)
if sys.version_info < (3, 4):
@classmethod
def duplicate_for_child(cls, fd):
popen = get_spawning_popen()
popen._fds.append(fd)
return reduction._mk_inheritable(fd)
else:
def duplicate_for_child(self, fd):
self._fds.append(fd)
return reduction._mk_inheritable(fd)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if sys.version_info < (3, 3):
import time
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
if self.returncode is None:
if timeout is not None:
from multiprocessing.connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
tracker_fd = resource_tracker._resource_tracker.getfd()
fp = BytesIO()
set_spawning_popen(self)
try:
prep_data = spawn.get_preparation_data(
process_obj._name,
getattr(process_obj, "init_main_module", True))
reduction.dump(prep_data, fp)
reduction.dump(process_obj, fp)
finally:
set_spawning_popen(None)
try:
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
# for fd in self._fds:
# _mk_inheritable(fd)
cmd_python = [sys.executable]
cmd_python += ['-m', self.__module__]
cmd_python += ['--process-name', str(process_obj.name)]
cmd_python += ['--pipe',
str(reduction._mk_inheritable(child_r))]
reduction._mk_inheritable(child_w)
reduction._mk_inheritable(tracker_fd)
self._fds.extend([child_r, child_w, tracker_fd])
if sys.version_info >= (3, 8) and os.name == 'posix':
mp_tracker_fd = prep_data['mp_tracker_args']['fd']
self.duplicate_for_child(mp_tracker_fd)
from .fork_exec import fork_exec
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
util.debug("launched python with pid {} and cmd:\n{}"
.format(pid, cmd_python))
self.sentinel = parent_r
method = 'getbuffer'
if not hasattr(fp, method):
method = 'getvalue'
with os.fdopen(parent_w, 'wb') as f:
f.write(getattr(fp, method)())
self.pid = pid
finally:
if parent_r is not None:
util.Finalize(self, os.close, (parent_r,))
for fd in (child_r, child_w):
if fd is not None:
os.close(fd)
@staticmethod
def thread_is_spawning():
return True
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Command line parser')
parser.add_argument('--pipe', type=int, required=True,
help='File handle for the pipe')
parser.add_argument('--process-name', type=str, default=None,
help='Identifier for debugging purpose')
args = parser.parse_args()
info = dict()
exitcode = 1
try:
with os.fdopen(args.pipe, 'rb') as from_parent:
process.current_process()._inheriting = True
try:
prep_data = pickle.load(from_parent)
spawn.prepare(prep_data)
process_obj = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
exitcode = process_obj._bootstrap()
except Exception:
print('\n\n' + '-' * 80)
print('{} failed with traceback: '.format(args.process_name))
print('-' * 80)
import traceback
print(traceback.format_exc())
print('\n' + '-' * 80)
finally:
if from_parent is not None:
from_parent.close()
sys.exit(exitcode)

View file

@ -0,0 +1,173 @@
import os
import sys
from pickle import load
from multiprocessing import process, util
from . import spawn
from . import reduction
from .context import get_spawning_popen, set_spawning_popen
if sys.platform == "win32":
# Avoid import error by code introspection tools such as test runners
# trying to import this module while running on non-Windows systems.
import msvcrt
from .compat_win32 import _winapi
from .compat_win32 import Popen as _Popen
from .reduction import duplicate
else:
_Popen = object
if sys.version_info[:2] < (3, 3):
from os import fdopen as open
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
def _path_eq(p1, p2):
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
WINENV = (hasattr(sys, "_base_executable")
and not _path_eq(sys.executable, sys._base_executable))
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(_Popen):
'''
Start a subprocess to run the code of a process object
'''
method = 'loky'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(
process_obj._name, getattr(process_obj, "init_main_module", True))
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rfd, wfd = os.pipe()
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
python_exe = spawn.get_executable()
# copy the environment variables to set in the child process
child_env = os.environ.copy()
child_env.update(process_obj.env)
# bpo-35797: When running in a venv, we bypass the redirect
# executor and launch our base Python.
if WINENV and _path_eq(python_exe, sys.executable):
python_exe = sys._base_executable
child_env["__PYVENV_LAUNCHER__"] = sys.executable
try:
with open(wfd, 'wb') as to_child:
# start process
try:
# This flag allows to pass inheritable handles from the
# parent to the child process in a python2-3 compatible way
# (see
# https://github.com/tomMoral/loky/pull/204#discussion_r290719629
# for more detail). When support for Python 2 is dropped,
# the cleaner multiprocessing.reduction.steal_handle should
# be used instead.
inherit = True
hp, ht, pid, tid = _winapi.CreateProcess(
python_exe, cmd,
None, None, inherit, 0,
child_env, None, None)
_winapi.CloseHandle(ht)
except BaseException:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
set_spawning_popen(self)
if sys.version_info[:2] < (3, 4):
Popen._tls.process_handle = int(hp)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
set_spawning_popen(None)
if sys.version_info[:2] < (3, 4):
del Popen._tls.process_handle
except IOError as exc:
# IOError 22 happens when the launched subprocess terminated before
# wfd.close is called. Thus we can safely ignore it.
if exc.errno != 22:
raise
util.debug("While starting {}, ignored a IOError 22"
.format(process_obj._name))
def duplicate_for_child(self, handle):
assert self is get_spawning_popen()
return duplicate(handle, self.sentinel)
def get_command_line(pipe_handle, **kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return ([sys.executable, '--multiprocessing-fork', pipe_handle])
else:
prog = 'from joblib.externals.loky.backend.popen_loky_win32 import main; main()'
opts = util._args_from_interpreter_flags()
return [spawn.get_executable()] + opts + [
'-c', prog, '--multiprocessing-fork', pipe_handle]
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def main():
'''
Run code specified by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
spawn.prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)

View file

@ -0,0 +1,108 @@
###############################################################################
# LokyProcess implementation
#
# authors: Thomas Moreau and Olivier Grisel
#
# based on multiprocessing/process.py (17/02/2017)
# * Add some compatibility function for python2.7 and 3.3
#
import os
import sys
from .compat import BaseProcess
class LokyProcess(BaseProcess):
_start_method = 'loky'
def __init__(self, group=None, target=None, name=None, args=(),
kwargs={}, daemon=None, init_main_module=False,
env=None):
if sys.version_info < (3, 3):
super(LokyProcess, self).__init__(
group=group, target=target, name=name, args=args,
kwargs=kwargs)
self.daemon = daemon
else:
super(LokyProcess, self).__init__(
group=group, target=target, name=name, args=args,
kwargs=kwargs, daemon=daemon)
self.env = {} if env is None else env
self.authkey = self.authkey
self.init_main_module = init_main_module
@staticmethod
def _Popen(process_obj):
if sys.platform == "win32":
from .popen_loky_win32 import Popen
else:
from .popen_loky_posix import Popen
return Popen(process_obj)
if sys.version_info < (3, 3):
def start(self):
'''
Start child process
'''
from multiprocessing.process import _current_process, _cleanup
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_current_process._children.add(self)
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
if sys.version_info < (3, 4):
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationKey(authkey)
def _bootstrap(self):
from .context import set_start_method
set_start_method(self._start_method)
super(LokyProcess, self)._bootstrap()
class LokyInitMainProcess(LokyProcess):
_start_method = 'loky_init_main'
def __init__(self, group=None, target=None, name=None, args=(),
kwargs={}, daemon=None):
super(LokyInitMainProcess, self).__init__(
group=group, target=target, name=name, args=args, kwargs=kwargs,
daemon=daemon, init_main_module=True)
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationKey(bytes):
def __reduce__(self):
from .context import assert_spawning
try:
assert_spawning(self)
except RuntimeError:
raise TypeError(
'Pickling an AuthenticationKey object is '
'disallowed for security reasons'
)
return AuthenticationKey, (bytes(self),)

View file

@ -0,0 +1,247 @@
###############################################################################
# Queue and SimpleQueue implementation for loky
#
# authors: Thomas Moreau, Olivier Grisel
#
# based on multiprocessing/queues.py (16/02/2017)
# * Add some compatibility function for python2.7 and 3.3 and makes sure
# it uses the right synchronization primitive.
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
# pickling process. (overload Queue._feed/SimpleQueue.put)
#
import os
import sys
import errno
import weakref
import threading
from multiprocessing import util
from multiprocessing import connection
from multiprocessing.synchronize import SEM_VALUE_MAX
from multiprocessing.queues import Full
from multiprocessing.queues import _sentinel, Queue as mp_Queue
from multiprocessing.queues import SimpleQueue as mp_SimpleQueue
from .reduction import loads, dumps
from .context import assert_spawning, get_context
__all__ = ['Queue', 'SimpleQueue', 'Full']
class Queue(mp_Queue):
def __init__(self, maxsize=0, reducers=None, ctx=None):
if sys.version_info[:2] >= (3, 4):
super().__init__(maxsize=maxsize, ctx=ctx)
else:
if maxsize <= 0:
# Can raise ImportError (see issues #3770 and #23400)
maxsize = SEM_VALUE_MAX
if ctx is None:
ctx = get_context()
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
util.register_after_fork(self, Queue._after_fork)
self._reducers = reducers
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._reducers, self._rlock, self._wlock, self._sem,
self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._reducers, self._rlock, self._wlock, self._sem,
self._opid) = state
if sys.version_info >= (3, 9):
self._reset()
else:
self._after_fork()
# Overload _start_thread to correctly call our custom _feed
def _start_thread(self):
util.debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._reducers,
self._ignore_epipe, self._on_queue_feeder_error, self._sem),
name='QueueFeederThread'
)
self._thread.daemon = True
util.debug('doing self._thread.start()')
self._thread.start()
util.debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = util.Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = util.Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
# Overload the _feed methods to use our custom pickling strategy.
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, reducers,
ignore_epipe, onerror, queue_sem):
util.debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
while 1:
try:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
util.debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj_ = dumps(obj, reducers=reducers)
if wacquire is None:
send_bytes(obj_)
else:
wacquire()
try:
send_bytes(obj_)
finally:
wrelease()
# Remove references early to avoid leaking memory
del obj, obj_
except IndexError:
pass
except BaseException as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
if util.is_exiting():
util.info('error in queue thread: %s', e)
return
else:
queue_sem.release()
onerror(e, obj)
def _on_queue_feeder_error(self, e, obj):
"""
Private API hook called when feeding data in the background thread
raises an exception. For overriding by concurrent.futures.
"""
import traceback
traceback.print_exc()
if sys.version_info[:2] < (3, 4):
# Compat for python2.7/3.3 that use _send instead of _send_bytes
def _after_fork(self):
super(Queue, self)._after_fork()
self._send_bytes = self._writer.send_bytes
class SimpleQueue(mp_SimpleQueue):
def __init__(self, reducers=None, ctx=None):
if sys.version_info[:2] >= (3, 4):
super().__init__(ctx=ctx)
else:
# Use the context to create the sync objects for python2.7/3.3
if ctx is None:
ctx = get_context()
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
# Add possiblity to use custom reducers
self._reducers = reducers
def close(self):
self._reader.close()
self._writer.close()
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._reducers, self._rlock,
self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._reducers, self._rlock,
self._wlock) = state
if sys.version_info[:2] < (3, 4):
# For python2.7/3.3, overload get to avoid creating deadlocks with
# unpickling errors.
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return loads(res)
# Overload put to use our customizable reducer
def put(self, obj):
# serialize the data before acquiring the lock
obj = dumps(obj, reducers=self._reducers)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)

View file

@ -0,0 +1,280 @@
###############################################################################
# Customizable Pickler with some basic reducers
#
# author: Thomas Moreau
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Replace the ForkingPickler with a similar _LokyPickler,
# * Add CustomizableLokyPickler to allow customizing pickling process
# on the fly.
#
import io
import os
import sys
import functools
from multiprocessing import util
import types
try:
# Python 2 compat
from cPickle import loads as pickle_loads
except ImportError:
from pickle import loads as pickle_loads
import copyreg
from pickle import HIGHEST_PROTOCOL
if sys.platform == "win32":
if sys.version_info[:2] > (3, 3):
from multiprocessing.reduction import duplicate
else:
from multiprocessing.forking import duplicate
###############################################################################
# Enable custom pickling in Loky.
# To allow instance customization of the pickling process, we use 2 classes.
# _ReducerRegistry gives module level customization and CustomizablePickler
# permits to use instance base custom reducers. Only CustomizablePickler
# should be used.
class _ReducerRegistry(object):
"""Registry for custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
dispatch_table = {}
@classmethod
def register(cls, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(cls, obj):
reduced = reduce_func(obj)
cls.save_reduce(obj=obj, *reduced)
cls.dispatch_table[type] = dispatcher
else:
cls.dispatch_table[type] = reduce_func
###############################################################################
# Registers extra pickling routines to improve picklization for loky
register = _ReducerRegistry.register
# make methods picklable
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
@classmethod
def h(cls):
pass
register(type(_C().f), _reduce_method)
register(type(_C.h), _reduce_method)
if not hasattr(sys, "pypy_version_info"):
# PyPy uses functions instead of method_descriptors and wrapper_descriptors
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
# Make partial func pickable
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
if sys.platform != "win32":
from ._posix_reduction import _mk_inheritable # noqa: F401
else:
from . import _win_reduction # noqa: F401
# global variable to change the pickler behavior
try:
from joblib.externals import cloudpickle # noqa: F401
DEFAULT_ENV = "cloudpickle"
except ImportError:
# If cloudpickle is not present, fallback to pickle
DEFAULT_ENV = "pickle"
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
_LokyPickler = None
_loky_pickler_name = None
def set_loky_pickler(loky_pickler=None):
global _LokyPickler, _loky_pickler_name
if loky_pickler is None:
loky_pickler = ENV_LOKY_PICKLER
loky_pickler_cls = None
# The default loky_pickler is cloudpickle
if loky_pickler in ["", None]:
loky_pickler = "cloudpickle"
if loky_pickler == _loky_pickler_name:
return
if loky_pickler == "cloudpickle":
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
else:
try:
from importlib import import_module
module_pickle = import_module(loky_pickler)
loky_pickler_cls = module_pickle.Pickler
except (ImportError, AttributeError) as e:
extra_info = ("\nThis error occurred while setting loky_pickler to"
" '{}', as required by the env variable LOKY_PICKLER"
" or the function set_loky_pickler."
.format(loky_pickler))
e.args = (e.args[0] + extra_info,) + e.args[1:]
e.msg = e.args[0]
raise e
util.debug("Using '{}' for serialization."
.format(loky_pickler if loky_pickler else "cloudpickle"))
class CustomizablePickler(loky_pickler_cls):
_loky_pickler_cls = loky_pickler_cls
def _set_dispatch_table(self, dispatch_table):
for ancestor_class in self._loky_pickler_cls.mro():
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
if isinstance(dt_attribute, types.MemberDescriptorType):
# Ancestor class (typically _pickle.Pickler) has a
# member_descriptor for its "dispatch_table" attribute. Use
# it to set the dispatch_table as a member instead of a
# dynamic attribute in the __dict__ of the instance,
# otherwise it will not be taken into account by the C
# implementation of the dump method if a subclass defines a
# class-level dispatch_table attribute as was done in
# cloudpickle 1.6.0:
# https://github.com/joblib/loky/pull/260
dt_attribute.__set__(self, dispatch_table)
break
# On top of member descriptor set, also use setattr such that code
# that directly access self.dispatch_table gets a consistent view
# of the same table.
self.dispatch_table = dispatch_table
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
loky_pickler_cls.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if sys.version_info < (3,):
self.dispatch = loky_pickler_cls.dispatch.copy()
self.dispatch.update(_ReducerRegistry.dispatch_table)
else:
if hasattr(self, "dispatch_table"):
# Force a copy that we will update without mutating the
# any class level defined dispatch_table.
loky_dt = dict(self.dispatch_table)
else:
# Use standard reducers as bases
loky_dt = copyreg.dispatch_table.copy()
# Register loky specific reducers
loky_dt.update(_ReducerRegistry.dispatch_table)
# Set the new dispatch table, taking care of the fact that we
# need to use the member_descriptor when we inherit from a
# subclass of the C implementation of the Pickler base class
# with an class level dispatch_table attribute.
self._set_dispatch_table(loky_dt)
# Register custom reducers
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table.
"""
if sys.version_info < (3,):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
_LokyPickler = CustomizablePickler
_loky_pickler_name = loky_pickler
def get_loky_pickler_name():
global _loky_pickler_name
return _loky_pickler_name
def get_loky_pickler():
global _LokyPickler
return _LokyPickler
# Set it to its default value
set_loky_pickler()
def loads(buf):
# Compat for python2.7 version
if sys.version_info < (3, 3) and isinstance(buf, io.BytesIO):
buf = buf.getvalue()
return pickle_loads(buf)
def dump(obj, file, reducers=None, protocol=None):
'''Replacement for pickle.dump() using _LokyPickler.'''
global _LokyPickler
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
def dumps(obj, reducers=None, protocol=None):
global _LokyPickler
buf = io.BytesIO()
dump(obj, buf, reducers=reducers, protocol=protocol)
if sys.version_info < (3, 3):
return buf.getvalue()
return buf.getbuffer()
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
if sys.platform == "win32":
__all__ += ["duplicate"]

View file

@ -0,0 +1,380 @@
###############################################################################
# Server process to keep track of unlinked resources, like folders and
# semaphores and clean them.
#
# author: Thomas Moreau
#
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
# * include custom spawnv_passfds to start the process
# * use custom unlink from our own SemLock implementation
# * add some VERBOSE logging
#
#
# On Unix we run a server process which keeps track of unlinked
# resources. The server ignores SIGINT and SIGTERM and reads from a
# pipe. The resource_tracker implements a reference counting scheme: each time
# a Python process anticipates the shared usage of a resource by another
# process, it signals the resource_tracker of this shared usage, and in return,
# the resource_tracker increments the resource's reference count by 1.
# Similarly, when access to a resource is closed by a Python process, the
# process notifies the resource_tracker by asking it to decrement the
# resource's reference count by 1. When the reference count drops to 0, the
# resource_tracker attempts to clean up the underlying resource.
# Finally, every other process connected to the resource tracker has a copy of
# the writable end of the pipe used to communicate with it, so the resource
# tracker gets EOF when all other processes have exited. Then the
# resource_tracker process unlinks any remaining leaked resources (with
# reference count above 0)
# For semaphores, this is important because the system only supports a limited
# number of named semaphores, and they will not be automatically removed till
# the next reboot. Without this resource tracker process, "killall python"
# would probably leave unlinked semaphores.
# Note that this behavior differs from CPython's resource_tracker, which only
# implements list of shared resources, and not a proper refcounting scheme.
# Also, CPython's resource tracker will only attempt to cleanup those shared
# resources once all procsses connected to the resouce tracker have exited.
import os
import shutil
import sys
import signal
import warnings
import threading
from . import spawn
from multiprocessing import util
if sys.platform == "win32":
from .compat_win32 import _winapi
from .reduction import duplicate
import msvcrt
try:
from _multiprocessing import sem_unlink
except ImportError:
from .semlock import sem_unlink
if sys.version_info < (3,):
BrokenPipeError = OSError
from os import fdopen as open
__all__ = ['ensure_running', 'register', 'unregister']
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
_CLEANUP_FUNCS = {
'folder': shutil.rmtree,
'file': os.unlink
}
if os.name == "posix":
_CLEANUP_FUNCS['semlock'] = sem_unlink
VERBOSE = False
class ResourceTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that resource tracker process is running.
This can be run from any process. Usually a child process will use
the resource created by its parent.'''
with self._lock:
if self._fd is not None:
# resource tracker was launched before, is it still running?
if self._check_alive():
# => still alive
return
# => dead, launch it again
os.close(self._fd)
if os.name == "posix":
try:
# At this point, the resource_tracker process has been
# killed or crashed. Let's remove the process entry
# from the process table to avoid zombie processes.
os.waitpid(self._pid, 0)
except OSError:
# The process was terminated or is a child from an
# ancestor of the current process.
pass
self._fd = None
self._pid = None
warnings.warn('resource_tracker: process died unexpectedly, '
'relaunching. Some folders/sempahores might '
'leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
r, w = os.pipe()
if sys.platform == "win32":
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
os.close(r)
r = _r
cmd = 'from {} import main; main({}, {})'.format(
main.__module__, r, VERBOSE)
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
# In python 3.3, there is a bug which put `-RRRRR..` instead of
# `-R` in args. Replace it to get the correct flags.
# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488
if sys.version_info[:2] <= (3, 3):
import re
for i in range(1, len(args)):
args[i] = re.sub("-R+", "-R", args[i])
args += ['-c', cmd]
util.debug("launching resource tracker: {}".format(args))
# bpo-33613: Register a signal mask that will block the
# signals. This signal mask will be inherited by the child
# that is going to be spawned and will protect the child from a
# race condition that can make the child die before it
# registers signal handlers for SIGINT and SIGTERM. The mask is
# unregistered after spawning the child.
try:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_BLOCK,
_IGNORED_SIGNALS)
pid = spawnv_passfds(exe, args, fds_to_pass)
finally:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK,
_IGNORED_SIGNALS)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
if sys.platform == "win32":
_winapi.CloseHandle(r)
else:
os.close(r)
def _check_alive(self):
'''Check for the existence of the resource tracker process.'''
try:
self._send('PROBE', '', '')
except BrokenPipeError:
return False
else:
return True
def register(self, name, rtype):
'''Register a named resource, and increment its refcount.'''
self.ensure_running()
self._send('REGISTER', name, rtype)
def unregister(self, name, rtype):
'''Unregister a named resource with resource tracker.'''
self.ensure_running()
self._send('UNREGISTER', name, rtype)
def maybe_unlink(self, name, rtype):
'''Decrement the refcount of a resource, and delete it if it hits 0'''
self.ensure_running()
self._send("MAYBE_UNLINK", name, rtype)
def _send(self, cmd, name, rtype):
msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
maybe_unlink = _resource_tracker.maybe_unlink
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd
def main(fd, verbose=0):
'''Run resource tracker.'''
# protect the process from ^C and "killall python" etc
if verbose:
util.log_to_stderr(level=util.DEBUG)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if verbose:
util.debug("Main resource tracker is running")
registry = {rtype: dict() for rtype in _CLEANUP_FUNCS.keys()}
try:
# keep track of registered/unregistered resources
if sys.platform == "win32":
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
with open(fd, 'rb') as f:
while True:
line = f.readline()
if line == b'': # EOF
break
try:
splitted = line.strip().decode('ascii').split(':')
# name can potentially contain separator symbols (for
# instance folders on Windows)
cmd, name, rtype = (
splitted[0], ':'.join(splitted[1:-1]), splitted[-1])
if cmd == 'PROBE':
continue
if rtype not in _CLEANUP_FUNCS:
raise ValueError(
'Cannot register {} for automatic cleanup: '
'unknown resource type ({}). Resource type should '
'be one of the following: {}'.format(
name, rtype, list(_CLEANUP_FUNCS.keys())))
if cmd == 'REGISTER':
if name not in registry[rtype]:
registry[rtype][name] = 1
else:
registry[rtype][name] += 1
if verbose:
util.debug(
"[ResourceTracker] incremented refcount of {} "
"{} (current {})".format(
rtype, name, registry[rtype][name]))
elif cmd == 'UNREGISTER':
del registry[rtype][name]
if verbose:
util.debug(
"[ResourceTracker] unregister {} {}: "
"registry({})".format(name, rtype, len(registry)))
elif cmd == 'MAYBE_UNLINK':
registry[rtype][name] -= 1
if verbose:
util.debug(
"[ResourceTracker] decremented refcount of {} "
"{} (current {})".format(
rtype, name, registry[rtype][name]))
if registry[rtype][name] == 0:
del registry[rtype][name]
try:
if verbose:
util.debug(
"[ResourceTracker] unlink {}"
.format(name))
_CLEANUP_FUNCS[rtype](name)
except Exception as e:
warnings.warn(
'resource_tracker: %s: %r' % (name, e))
else:
raise RuntimeError('unrecognized command %r' % cmd)
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining resources
def _unlink_resources(rtype_registry, rtype):
if rtype_registry:
try:
warnings.warn('resource_tracker: There appear to be %d '
'leaked %s objects to clean up at shutdown' %
(len(rtype_registry), rtype))
except Exception:
pass
for name in rtype_registry:
# For some reason the process which created and registered this
# resource has failed to unregister it. Presumably it has
# died. We therefore clean it up.
try:
_CLEANUP_FUNCS[rtype](name)
if verbose:
util.debug("[ResourceTracker] unlink {}"
.format(name))
except Exception as e:
warnings.warn('resource_tracker: %s: %r' % (name, e))
for rtype, rtype_registry in registry.items():
if rtype == "folder":
continue
else:
_unlink_resources(rtype_registry, rtype)
# The default cleanup routine for folders deletes everything inside
# those folders recursively, which can include other resources tracked
# by the resource tracker). To limit the risk of the resource tracker
# attempting to delete twice a resource (once as part of a tracked
# folder, and once as a resource), we delete the folders after all
# other resource types.
if "folder" in registry:
_unlink_resources(registry["folder"], "folder")
if verbose:
util.debug("resource tracker shut down")
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
if sys.platform != "win32":
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
_pass = []
for fd in passfds:
_pass += [_mk_inheritable(fd)]
from .fork_exec import fork_exec
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
else:
cmd = ' '.join('"%s"' % x for x in args)
try:
hp, ht, pid, tid = _winapi.CreateProcess(
path, cmd, None, None, True, 0, None, None, None)
_winapi.CloseHandle(ht)
except BaseException:
pass
return pid

View file

@ -0,0 +1,274 @@
###############################################################################
# Ctypes implementation for posix semaphore.
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from cpython/Modules/_multiprocessing/semaphore.c (17/02/2017)
# * use ctypes to access pthread semaphores and provide a full python
# semaphore management.
# * For OSX, as no sem_getvalue is not implemented, Semaphore with value > 1
# are not guaranteed to work.
# * Only work with LokyProcess on posix
#
import os
import sys
import time
import errno
import ctypes
import tempfile
import threading
from ctypes.util import find_library
# As we need to use ctypes return types for semlock object, failure value
# needs to be cast to proper python value. Unix failure convention is to
# return 0, whereas OSX returns -1
SEM_FAILURE = ctypes.c_void_p(0).value
if sys.platform == 'darwin':
SEM_FAILURE = ctypes.c_void_p(-1).value
# Semaphore types
RECURSIVE_MUTEX = 0
SEMAPHORE = 1
# Semaphore constants
SEM_OFLAG = ctypes.c_int(os.O_CREAT | os.O_EXCL)
SEM_PERM = ctypes.c_int(384)
class timespec(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
if sys.platform != 'win32':
pthread = ctypes.CDLL(find_library('pthread'), use_errno=True)
pthread.sem_open.restype = ctypes.c_void_p
pthread.sem_close.argtypes = [ctypes.c_void_p]
pthread.sem_wait.argtypes = [ctypes.c_void_p]
pthread.sem_trywait.argtypes = [ctypes.c_void_p]
pthread.sem_post.argtypes = [ctypes.c_void_p]
pthread.sem_getvalue.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
pthread.sem_unlink.argtypes = [ctypes.c_char_p]
if sys.platform != "darwin":
pthread.sem_timedwait.argtypes = [ctypes.c_void_p,
ctypes.POINTER(timespec)]
try:
from threading import get_ident
except ImportError:
def get_ident():
return threading.current_thread().ident
if sys.version_info[:2] < (3, 3):
class FileExistsError(OSError):
pass
class FileNotFoundError(OSError):
pass
def sem_unlink(name):
if pthread.sem_unlink(name.encode('ascii')) < 0:
raiseFromErrno()
def _sem_open(name, value=None):
""" Construct or retrieve a semaphore with the given name
If value is None, try to retrieve an existing named semaphore.
Else create a new semaphore with the given value
"""
if value is None:
handle = pthread.sem_open(ctypes.c_char_p(name), 0)
else:
handle = pthread.sem_open(ctypes.c_char_p(name), SEM_OFLAG, SEM_PERM,
ctypes.c_int(value))
if handle == SEM_FAILURE:
e = ctypes.get_errno()
if e == errno.EEXIST:
raise FileExistsError("a semaphore named %s already exists" % name)
elif e == errno.ENOENT:
raise FileNotFoundError('cannot find semaphore named %s' % name)
elif e == errno.ENOSYS:
raise NotImplementedError('No semaphore implementation on this '
'system')
else:
raiseFromErrno()
return handle
def _sem_timedwait(handle, timeout):
t_start = time.time()
if sys.platform != "darwin":
sec = int(timeout)
tv_sec = int(t_start)
nsec = int(1e9 * (timeout - sec) + .5)
tv_nsec = int(1e9 * (t_start - tv_sec) + .5)
deadline = timespec(sec+tv_sec, nsec+tv_nsec)
deadline.tv_sec += int(deadline.tv_nsec / 1000000000)
deadline.tv_nsec %= 1000000000
return pthread.sem_timedwait(handle, ctypes.pointer(deadline))
# PERFORMANCE WARNING
# No sem_timedwait on OSX so we implement our own method. This method can
# degrade performances has the wait can have a latency up to 20 msecs
deadline = t_start + timeout
delay = 0
now = time.time()
while True:
# Poll the sem file
res = pthread.sem_trywait(handle)
if res == 0:
return 0
else:
e = ctypes.get_errno()
if e != errno.EAGAIN:
raiseFromErrno()
# check for timeout
now = time.time()
if now > deadline:
ctypes.set_errno(errno.ETIMEDOUT)
return -1
# calculate how much time left and check the delay is not too long
# -- maximum is 20 msecs
difference = (deadline - now)
delay = min(delay, 20e-3, difference)
# Sleep and increase delay
time.sleep(delay)
delay += 1e-3
class SemLock(object):
"""ctypes wrapper to the unix semaphore"""
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, name=None, unlink_now=False):
self.count = 0
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
self.name = name
self.handle = _sem_open(self.name.encode('ascii'), value)
def __del__(self):
try:
res = pthread.sem_close(self.handle)
assert res == 0, "Issue while closing semaphores"
except AttributeError:
pass
def _is_mine(self):
return self.count > 0 and get_ident() == self.ident
def acquire(self, block=True, timeout=None):
if self.kind == RECURSIVE_MUTEX and self._is_mine():
self.count += 1
return True
if block and timeout is None:
res = pthread.sem_wait(self.handle)
elif not block or timeout <= 0:
res = pthread.sem_trywait(self.handle)
else:
res = _sem_timedwait(self.handle, timeout)
if res < 0:
e = ctypes.get_errno()
if e == errno.EINTR:
return None
elif e in [errno.EAGAIN, errno.ETIMEDOUT]:
return False
raiseFromErrno()
self.count += 1
self.ident = get_ident()
return True
def release(self):
if self.kind == RECURSIVE_MUTEX:
assert self._is_mine(), (
"attempt to release recursive lock not owned by thread")
if self.count > 1:
self.count -= 1
return
assert self.count == 1
else:
if sys.platform == 'darwin':
# Handle broken get_value for mac ==> only Lock will work
# as sem_get_value do not work properly
if self.maxvalue == 1:
if pthread.sem_trywait(self.handle) < 0:
e = ctypes.get_errno()
if e != errno.EAGAIN:
raise OSError(e, errno.errorcode[e])
else:
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
else:
raise ValueError(
"semaphore or lock released too many times")
else:
import warnings
warnings.warn("semaphore are broken on OSX, release might "
"increase its maximal value", RuntimeWarning)
else:
value = self._get_value()
if value >= self.maxvalue:
raise ValueError(
"semaphore or lock released too many times")
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
self.count -= 1
def _get_value(self):
value = ctypes.pointer(ctypes.c_int(-1))
if pthread.sem_getvalue(self.handle, value) < 0:
raiseFromErrno()
return value.contents.value
def _count(self):
return self.count
def _is_zero(self):
if sys.platform == 'darwin':
# Handle broken get_value for mac ==> only Lock will work
# as sem_get_value do not work properly
if pthread.sem_trywait(self.handle) < 0:
e = ctypes.get_errno()
if e == errno.EAGAIN:
return True
raise OSError(e, errno.errorcode[e])
else:
if pthread.sem_post(self.handle) < 0:
raiseFromErrno()
return False
else:
value = ctypes.pointer(ctypes.c_int(-1))
if pthread.sem_getvalue(self.handle, value) < 0:
raiseFromErrno()
return value.contents.value == 0
def _after_fork(self):
self.count = 0
@staticmethod
def _rebuild(handle, kind, maxvalue, name):
self = SemLock.__new__(SemLock)
self.count = 0
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
self.name = name
self.handle = _sem_open(name.encode('ascii'))
return self
def raiseFromErrno():
e = ctypes.get_errno()
raise OSError(e, errno.errorcode[e])

View file

@ -0,0 +1,258 @@
###############################################################################
# Prepares and processes the data to setup the new process environment
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/spawn.py (17/02/2017)
# * Improve logging data
#
import os
import sys
import runpy
import types
from multiprocessing import process, util
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
import msvcrt
from .reduction import duplicate
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def get_executable():
return _python_exe
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name, init_main_module=True):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=bytes(process.current_process().authkey),
name=name,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd()
)
# Send sys_path and make sure the current directory will not be changed
sys_path = [p for p in sys.path]
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d['sys_path'] = sys_path
# Make sure to pass the information if the multiprocessing logger is active
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
if len(util._logger.handlers) > 0:
h = util._logger.handlers[0]
d['log_fmt'] = h.formatter._fmt
# Tell the child how to communicate with the resource_tracker
from .resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
d["tracker_args"] = {"pid": _resource_tracker._pid}
if sys.platform == "win32":
child_w = duplicate(
msvcrt.get_osfhandle(_resource_tracker._fd), inheritable=True)
d["tracker_args"]["fh"] = child_w
else:
d["tracker_args"]["fd"] = _resource_tracker._fd
if sys.version_info >= (3, 8) and os.name == 'posix':
# joblib/loky#242: allow loky processes to retrieve the resource
# tracker of their parent in case the child processes depickles
# shared_memory objects, that are still tracked by multiprocessing's
# resource_tracker by default.
# XXX: this is a workaround that may be error prone: in the future, it
# would be better to have loky subclass multiprocessing's shared_memory
# to force registration of shared_memory segments via loky's
# resource_tracker.
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
# multiprocessing's resource_tracker must be running before loky
# process is created (othewise the child won't be able to use it if it
# is created later on)
mp_resource_tracker.ensure_running()
d["mp_tracker_args"] = {
'fd': mp_resource_tracker._fd, 'pid': mp_resource_tracker._pid
}
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
if init_main_module:
main_module = sys.modules['__main__']
try:
main_mod_name = getattr(main_module.__spec__, "name", None)
except BaseException:
main_mod_name = None
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
# Compat for python2.7
d['main_path'] = d['init_main_from_path']
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'log_fmt' in data:
import logging
util.get_logger().handlers[0].setFormatter(
logging.Formatter(data['log_fmt'])
)
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'mp_tracker_args' in data:
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
mp_resource_tracker._fd = data['mp_tracker_args']['fd']
mp_resource_tracker._pid = data['mp_tracker_args']['pid']
if 'tracker_args' in data:
from .resource_tracker import _resource_tracker
_resource_tracker._pid = data["tracker_args"]['pid']
if sys.platform == 'win32':
handle = data["tracker_args"]["fh"]
_resource_tracker._fd = msvcrt.open_osfhandle(handle, 0)
else:
_resource_tracker._fd = data["tracker_args"]["fd"]
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path'])
# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
_fixup_main_from_path(main_path)

View file

@ -0,0 +1,381 @@
###############################################################################
# Synchronization primitives based on our SemLock implementation
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/synchronize.py (17/02/2017)
# * Remove ctx argument for compatibility reason
# * Implementation of Condition/Event are necessary for compatibility
# with python2.7/3.3, Barrier should be reimplemented to for those
# version (but it is not used in loky).
#
import os
import sys
import tempfile
import threading
import _multiprocessing
from time import time as _time
from .context import assert_spawning
from . import resource_tracker
from multiprocessing import process
from multiprocessing import util
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
if sys.version_info < (3, 4):
from .semlock import SemLock as _SemLock
from .semlock import sem_unlink
else:
from _multiprocessing import SemLock as _SemLock
from _multiprocessing import sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
if sys.version_info[:2] < (3, 3):
FileExistsError = OSError
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue):
# unlink_now is only used on win32 or when we are using fork.
unlink_now = False
for i in range(100):
try:
self._semlock = _SemLock(
kind, value, maxvalue, SemLock._make_name(),
unlink_now)
except FileExistsError: # pragma: no cover
pass
else:
break
else: # pragma: no cover
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s and name "%s"'
% (self._semlock.handle, self._semlock.name))
self._make_methods()
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
# When the object is garbage collected or the
# process shuts down we unlink the semaphore name
resource_tracker.register(self._semlock.name, "semlock")
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
sem_unlink(name)
resource_tracker.unregister(name, "semlock")
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.acquire()
def __exit__(self, *args):
return self._semlock.release()
def __getstate__(self):
assert_spawning(self)
sl = self._semlock
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r and name "%s"'
% (state[0], state[3]))
self._make_methods()
@staticmethod
def _make_name():
# OSX does not support long names for semaphores
return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
def get_value(self):
if sys.platform == 'darwin':
raise NotImplementedError("OSX does not implement sem_getvalue")
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
SemLock.__init__(self, SEMAPHORE, value, value)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self):
super(Lock, self).__init__(SEMAPHORE, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self):
super(RLock, self).__init__(RECURSIVE_MUTEX, 1, 1)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None):
self._lock = lock or RLock()
self._sleeping_count = Semaphore(0)
self._woken_count = Semaphore(0)
self._wait_semaphore = Semaphore(0)
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__,
self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self):
self._cond = Condition(Lock())
self._flag = Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False

View file

@ -0,0 +1,172 @@
import os
import sys
import time
import errno
import signal
import warnings
import threading
import subprocess
try:
import psutil
except ImportError:
psutil = None
WIN32 = sys.platform == "win32"
def _flag_current_thread_clean_exit():
"""Put a ``_clean_exit`` flag on the current thread"""
thread = threading.current_thread()
thread._clean_exit = True
def recursive_terminate(process, use_psutil=True):
if use_psutil and psutil is not None:
_recursive_terminate_with_psutil(process)
else:
_recursive_terminate_without_psutil(process)
def _recursive_terminate_with_psutil(process, retries=5):
try:
children = psutil.Process(process.pid).children(recursive=True)
except psutil.NoSuchProcess:
return
# Kill the children in reverse order to avoid killing the parents before
# the children in cases where there are more processes nested.
for child in children[::-1]:
try:
child.kill()
except psutil.NoSuchProcess:
pass
process.terminate()
process.join()
def _recursive_terminate_without_psutil(process):
"""Terminate a process and its descendants.
"""
try:
_recursive_terminate(process.pid)
except OSError as e:
warnings.warn("Failed to kill subprocesses on this platform. Please"
"install psutil: https://github.com/giampaolo/psutil")
# In case we cannot introspect the children, we fall back to the
# classic Process.terminate.
process.terminate()
process.join()
def _recursive_terminate(pid):
"""Recursively kill the descendants of a process before killing it.
"""
if sys.platform == "win32":
# On windows, the taskkill function with option `/T` terminate a given
# process pid and its children.
try:
subprocess.check_output(
["taskkill", "/F", "/T", "/PID", str(pid)],
stderr=None)
except subprocess.CalledProcessError as e:
# In windows, taskkill return 1 for permission denied and 128, 255
# for no process found.
if e.returncode not in [1, 128, 255]:
raise
elif e.returncode == 1:
# Try to kill the process without its descendants if taskkill
# was denied permission. If this fails too, with an error
# different from process not found, let the top level function
# raise a warning and retry to kill the process.
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
else:
try:
children_pids = subprocess.check_output(
["pgrep", "-P", str(pid)],
stderr=None
)
except subprocess.CalledProcessError as e:
# `ps` returns 1 when no child process has been found
if e.returncode == 1:
children_pids = b''
else:
raise
# Decode the result, split the cpid and remove the trailing line
children_pids = children_pids.decode().split('\n')[:-1]
for cpid in children_pids:
cpid = int(cpid)
_recursive_terminate(cpid)
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
# if OSError is raised with [Errno 3] no such process, the process
# is already terminated, else, raise the error and let the top
# level function raise a warning and retry to kill the process.
if e.errno != errno.ESRCH:
raise
def get_exitcodes_terminated_worker(processes):
"""Return a formated string with the exitcodes of terminated workers.
If necessary, wait (up to .25s) for the system to correctly set the
exitcode of one terminated worker.
"""
patience = 5
# Catch the exitcode of the terminated workers. There should at least be
# one. If not, wait a bit for the system to correctly set the exitcode of
# the terminated worker.
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
while len(exitcodes) == 0 and patience > 0:
patience -= 1
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
time.sleep(.05)
return _format_exitcodes(exitcodes)
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = ["{}({})".format(_get_exitcode_name(e), e)
for e in exitcodes if e is not None]
return "{" + ", ".join(str_exitcodes) + "}"
def _get_exitcode_name(exitcode):
if sys.platform == "win32":
# The exitcode are unreliable on windows (see bpo-31863).
# For this case, return UNKNOWN
return "UNKNOWN"
if exitcode < 0:
try:
import signal
if sys.version_info > (3, 5):
return signal.Signals(-exitcode).name
# construct an inverse lookup table
for v, k in signal.__dict__.items():
if (v.startswith('SIG') and not v.startswith('SIG_') and
k == -exitcode):
return v
except ValueError:
return "UNKNOWN"
elif exitcode != 255:
# The exitcode are unreliable on forkserver were 255 is always returned
# (see bpo-30589). For this case, return UNKNOWN
return "EXIT"
return "UNKNOWN"

View file

@ -0,0 +1,113 @@
import inspect
from functools import partial
try:
from joblib.externals.cloudpickle import dumps, loads
cloudpickle = True
except ImportError:
cloudpickle = False
WRAP_CACHE = dict()
class CloudpickledObjectWrapper(object):
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ['_obj', '_keep_wrapper']:
return getattr(self._obj, attr)
return getattr(self, attr)
# Make sure the wrapped object conserves the callable property
class CallableObjectWrapper(CloudpickledObjectWrapper):
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper)
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
if not cloudpickle:
return obj
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
if not cloudpickle:
raise ImportError("could not from joblib.externals import cloudpickle. Please install "
"cloudpickle to allow extended serialization. "
"(`pip install cloudpickle`).")
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,232 @@
###############################################################################
# Reusable ProcessPoolExecutor
#
# author: Thomas Moreau and Olivier Grisel
#
import time
import warnings
import threading
import multiprocessing as mp
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
from .backend.context import cpu_count
from .backend import get_context
__all__ = ['get_reusable_executor']
# Python 2 compat helper
STRING_TYPE = type("")
# Singleton executor and id management
_executor_lock = threading.RLock()
_next_executor_id = 0
_executor = None
_executor_kwargs = None
def _get_next_executor_id():
"""Ensure that each successive executor instance has a unique, monotonic id.
The purpose of this monotonic id is to help debug and test automated
instance creation.
"""
global _next_executor_id
with _executor_lock:
executor_id = _next_executor_id
_next_executor_id += 1
return executor_id
def get_reusable_executor(max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
"""Return the current ReusableExectutor instance.
Start a new instance if it has not been started already or if the previous
instance was left in a broken state.
If the previous instance does not have the requested number of workers, the
executor is dynamically resized to adjust the number of workers prior to
returning.
Reusing a singleton instance spares the overhead of starting new worker
processes and importing common python packages each time.
``max_workers`` controls the maximum number of tasks that can be running in
parallel in worker processes. By default this is set to the number of
CPUs on the host.
Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
so as to release system resources. New workers are respawn upon submission
of new tasks so that ``max_workers`` are available to accept the newly
submitted tasks. Setting ``timeout`` to around 100 times the time required
to spawn new processes and import packages in them (on the order of 100ms)
ensures that the overhead of spawning workers is negligible.
Setting ``kill_workers=True`` makes it possible to forcibly interrupt
previously spawned jobs to get a new instance of the reusable executor
with new constructor argument values.
The ``job_reducers`` and ``result_reducers`` are used to customize the
pickling of tasks and results send to the executor.
When provided, the ``initializer`` is run first in newly spawned
processes with argument ``initargs``.
The environment variable in the child process are a copy of the values in
the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
``VAR`` are string literals to overwrite the environment variable ``ENV``
in the child processes to value ``VAL``. The environment variables are set
in the children before any module is loaded. This only works with with the
``loky`` context and it is unreliable on Windows with Python < 3.6.
"""
_executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=max_workers, context=context, timeout=timeout,
kill_workers=kill_workers, reuse=reuse, job_reducers=job_reducers,
result_reducers=result_reducers, initializer=initializer,
initargs=initargs, env=env
)
return _executor
class _ReusablePoolExecutor(ProcessPoolExecutor):
def __init__(self, submit_resize_lock, max_workers=None, context=None,
timeout=None, executor_id=0, job_reducers=None,
result_reducers=None, initializer=None, initargs=(),
env=None):
super(_ReusablePoolExecutor, self).__init__(
max_workers=max_workers, context=context, timeout=timeout,
job_reducers=job_reducers, result_reducers=result_reducers,
initializer=initializer, initargs=initargs, env=env)
self.executor_id = executor_id
self._submit_resize_lock = submit_resize_lock
@classmethod
def get_reusable_executor(cls, max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
with _executor_lock:
global _executor, _executor_kwargs
executor = _executor
if max_workers is None:
if reuse is True and executor is not None:
max_workers = executor._max_workers
else:
max_workers = cpu_count()
elif max_workers <= 0:
raise ValueError(
"max_workers must be greater than 0, got {}."
.format(max_workers))
if isinstance(context, STRING_TYPE):
context = get_context(context)
if context is not None and context.get_start_method() == "fork":
raise ValueError(
"Cannot use reusable executor with the 'fork' context"
)
kwargs = dict(context=context, timeout=timeout,
job_reducers=job_reducers,
result_reducers=result_reducers,
initializer=initializer, initargs=initargs,
env=env)
if executor is None:
is_reused = False
mp.util.debug("Create a executor with max_workers={}."
.format(max_workers))
executor_id = _get_next_executor_id()
_executor_kwargs = kwargs
_executor = executor = cls(
_executor_lock, max_workers=max_workers,
executor_id=executor_id, **kwargs)
else:
if reuse == 'auto':
reuse = kwargs == _executor_kwargs
if (executor._flags.broken or executor._flags.shutdown
or not reuse):
if executor._flags.broken:
reason = "broken"
elif executor._flags.shutdown:
reason = "shutdown"
else:
reason = "arguments have changed"
mp.util.debug(
"Creating a new executor with max_workers={} as the "
"previous instance cannot be reused ({})."
.format(max_workers, reason))
executor.shutdown(wait=True, kill_workers=kill_workers)
_executor = executor = _executor_kwargs = None
# Recursive call to build a new instance
return cls.get_reusable_executor(max_workers=max_workers,
**kwargs)
else:
mp.util.debug(
"Reusing existing executor with max_workers={}."
.format(executor._max_workers)
)
is_reused = True
executor._resize(max_workers)
return executor, is_reused
def submit(self, fn, *args, **kwargs):
with self._submit_resize_lock:
return super(_ReusablePoolExecutor, self).submit(
fn, *args, **kwargs)
def _resize(self, max_workers):
with self._submit_resize_lock:
if max_workers is None:
raise ValueError("Trying to resize with max_workers=None")
elif max_workers == self._max_workers:
return
if self._executor_manager_thread is None:
# If the executor_manager_thread has not been started
# then no processes have been spawned and we can just
# update _max_workers and return
self._max_workers = max_workers
return
self._wait_job_completion()
# Some process might have returned due to timeout so check how many
# children are still alive. Use the _process_management_lock to
# ensure that no process are spawned or timeout during the resize.
with self._processes_management_lock:
processes = list(self._processes.values())
nb_children_alive = sum(p.is_alive() for p in processes)
self._max_workers = max_workers
for _ in range(max_workers, nb_children_alive):
self._call_queue.put(None)
while (len(self._processes) > max_workers
and not self._flags.broken):
time.sleep(1e-3)
self._adjust_process_count()
processes = list(self._processes.values())
while not all([p.is_alive() for p in processes]):
time.sleep(1e-3)
def _wait_job_completion(self):
"""Wait for the cache to be empty before resizing the pool."""
# Issue a warning to the user about the bad effect of this usage.
if len(self._pending_work_items) > 0:
warnings.warn("Trying to resize an executor with running jobs: "
"waiting for jobs completion before resizing.",
UserWarning)
mp.util.debug("Executor {} waiting for jobs completion before"
" resizing".format(self.executor_id))
# Wait for the completion of the jobs
while len(self._pending_work_items) > 0:
time.sleep(1e-3)
def _setup_queues(self, job_reducers, result_reducers):
# As this executor can be resized, use a large queue size to avoid
# underestimating capacity and introducing overhead
queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
super(_ReusablePoolExecutor, self)._setup_queues(
job_reducers, result_reducers, queue_size=queue_size)

View file

@ -0,0 +1,36 @@
from sys import version_info
from warnings import warn
"""
Represent an exception with a lot of information.
Provides 2 useful functions:
format_exc: format an exception into a complete traceback, with full
debugging instruction.
format_outer_frames: format the current position in the stack call.
Adapted from IPython's VerboseTB.
This module is deprecated and will be removed in joblib 0.16.
"""
from joblib import _deprecated_format_stack
_deprecated_names = [
name for name in dir(_deprecated_format_stack) if
not name.startswith("__") # special attributes
]
if version_info[:2] >= (3, 7):
def __getattr__(name):
if not name.startswith("__") and name in _deprecated_names:
warn("{} is deprecated and will be removed from joblib "
"in 0.16".format(name), DeprecationWarning)
return getattr(_deprecated_format_stack, name)
raise AttributeError
else:
for name in _deprecated_names:
globals()[name] = getattr(_deprecated_format_stack, name)

View file

@ -0,0 +1,345 @@
"""
My own variation on function-specific inspect-like features.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import inspect
import warnings
import re
import os
import collections
from itertools import islice
from tokenize import open as open_py_source
from .logger import pformat
full_argspec_fields = ('args varargs varkw defaults kwonlyargs '
'kwonlydefaults annotations')
full_argspec_type = collections.namedtuple('FullArgSpec', full_argspec_fields)
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
r'\<doctest (.*\.rst)\[(.*)\]\>', source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1
def _clean_win_chars(string):
"""Windows cannot encode some characters in filename."""
import urllib
if hasattr(urllib, 'quote'):
quote = urllib.quote
else:
# In Python 3, quote is elsewhere
import urllib.parse
quote = urllib.parse.quote
for char in ('<', '>', '!', ':', '\\'):
string = string.replace(char, quote(char))
return string
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# We're in a IPython (or notebook) session. parts[-1] comes
# from func.__code__.co_filename and is of the form
# <ipython-input-N-XYZ>, where:
# - N is the cell number where the function was defined
# - XYZ is a hash representing the function's code (and name).
# It will be consistent across sessions and kernel restarts,
# and will change if the function's code/name changes
# We remove N so that cache is properly hit if the cell where
# the func is defined is re-exectuted.
# The XYZ hash should avoid collisions between functions with
# the same name, both within the same notebook but also across
# notebooks
splitted = parts[-1].split('-')
parts[-1] = '-'.join(splitted[:2] + splitted[3:])
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def _signature_str(function_name, arg_spec):
"""Helper function to output a function signature"""
arg_spec_str = inspect.formatargspec(*arg_spec)
return '{}{}'.format(function_name, arg_spec_str)
def _function_called_str(function_name, args, kwargs):
"""Helper function to output a function call"""
template_str = '{0}({1}, {2})'
args_str = repr(args)[1:-1]
kwargs_str = ', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
return template_str.format(function_name, args_str,
kwargs_str)
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, str):
# Catch a common mistake
raise ValueError(
'ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_spec = inspect.getfullargspec(func)
arg_names = arg_spec.args + arg_spec.kwonlyargs
arg_defaults = arg_spec.defaults or ()
if arg_spec.kwonlydefaults:
arg_defaults = arg_defaults + tuple(arg_spec.kwonlydefaults[k]
for k in arg_spec.kwonlyargs
if k in arg_spec.kwonlydefaults)
arg_varargs = arg_spec.varargs
arg_varkw = arg_spec.varkw
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
if arg_name not in arg_spec.kwonlyargs:
arg_dict[arg_name] = args[arg_position]
else:
raise ValueError(
"Keyword-only parameter '%s' was passed as "
'positional parameter for %s:\n'
' %s was called.'
% (arg_name,
_signature_str(name, arg_spec),
_function_called_str(name, args, kwargs))
)
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs[arg_name]
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError) as e:
# Missing argument
raise ValueError(
'Wrong number of arguments for %s:\n'
' %s was called.'
% (_signature_str(name, arg_spec),
_function_called_str(name, args, kwargs))
) from e
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_varkw is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_varkw is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s"
% (item,
_signature_str(name, arg_spec))
)
# XXX: Return a sorted list of pairs?
return arg_dict
def _format_arg(arg):
formatted_arg = pformat(arg, indent=2)
if len(formatted_arg) > 1500:
formatted_arg = '%s...' % formatted_arg[:700]
return formatted_arg
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
formatted_arg = _format_arg(arg)
if previous_length > 80:
formatted_arg = '\n%s' % formatted_arg
previous_length = len(formatted_arg)
arg_str.append(formatted_arg)
arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
# self.debug(msg)

Some files were not shown because too many files have changed in this diff Show more