Uploaded Test files
This commit is contained in:
parent
f584ad9d97
commit
2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions
16
venv/Lib/site-packages/joblib/externals/loky/backend/__init__.py
vendored
Normal file
16
venv/Lib/site-packages/joblib/externals/loky/backend/__init__.py
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from .context import get_context
|
||||
|
||||
if sys.version_info > (3, 4):
|
||||
|
||||
def _make_name():
|
||||
name = '/loky-%i-%s' % (os.getpid(), next(synchronize.SemLock._rand))
|
||||
return name
|
||||
|
||||
# monkey patch the name creation for multiprocessing
|
||||
from multiprocessing import synchronize
|
||||
synchronize.SemLock._make_name = staticmethod(_make_name)
|
||||
|
||||
__all__ = ["get_context"]
|
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_posix_wait.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_posix_wait.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_win_wait.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/_win_wait.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat_posix.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat_posix.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat_win32.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/compat_win32.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/managers.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/managers.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/semlock.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/semlock.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-36.pyc
vendored
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-36.pyc
vendored
Normal file
BIN
venv/Lib/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-36.pyc
vendored
Normal file
Binary file not shown.
76
venv/Lib/site-packages/joblib/externals/loky/backend/_posix_reduction.py
vendored
Normal file
76
venv/Lib/site-packages/joblib/externals/loky/backend/_posix_reduction.py
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
###############################################################################
|
||||
# Extra reducers for Unix based system and connections objects
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Add adapted reduction for LokyProcesses and socket/Connection
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import _socket
|
||||
|
||||
from .reduction import register
|
||||
from .context import get_spawning_popen
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from multiprocessing.connection import Connection
|
||||
else:
|
||||
from _multiprocessing import Connection
|
||||
|
||||
|
||||
HAVE_SEND_HANDLE = (hasattr(socket, 'CMSG_LEN') and
|
||||
hasattr(socket, 'SCM_RIGHTS') and
|
||||
hasattr(socket.socket, 'sendmsg'))
|
||||
|
||||
|
||||
def _mk_inheritable(fd):
|
||||
if sys.version_info[:2] > (3, 3):
|
||||
os.set_inheritable(fd, True)
|
||||
return fd
|
||||
|
||||
|
||||
def DupFd(fd):
|
||||
'''Return a wrapper for an fd.'''
|
||||
popen_obj = get_spawning_popen()
|
||||
if popen_obj is not None:
|
||||
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
|
||||
elif HAVE_SEND_HANDLE and sys.version_info[:2] > (3, 3):
|
||||
from multiprocessing import resource_sharer
|
||||
return resource_sharer.DupFd(fd)
|
||||
else:
|
||||
raise TypeError(
|
||||
'Cannot pickle connection object. This object can only be '
|
||||
'passed when spawning a new process'
|
||||
)
|
||||
|
||||
|
||||
if sys.version_info[:2] != (3, 3):
|
||||
def _reduce_socket(s):
|
||||
df = DupFd(s.fileno())
|
||||
return _rebuild_socket, (df, s.family, s.type, s.proto)
|
||||
|
||||
def _rebuild_socket(df, family, type, proto):
|
||||
fd = df.detach()
|
||||
return socket.fromfd(fd, family, type, proto)
|
||||
else:
|
||||
from multiprocessing.reduction import reduce_socket as _reduce_socket
|
||||
|
||||
|
||||
register(socket.socket, _reduce_socket)
|
||||
register(_socket.socket, _reduce_socket)
|
||||
|
||||
|
||||
if sys.version_info[:2] != (3, 3):
|
||||
def reduce_connection(conn):
|
||||
df = DupFd(conn.fileno())
|
||||
return rebuild_connection, (df, conn.readable, conn.writable)
|
||||
|
||||
def rebuild_connection(df, readable, writable):
|
||||
fd = df.detach()
|
||||
return Connection(fd, readable, writable)
|
||||
else:
|
||||
from multiprocessing.reduction import reduce_connection
|
||||
|
||||
register(Connection, reduce_connection)
|
105
venv/Lib/site-packages/joblib/externals/loky/backend/_posix_wait.py
vendored
Normal file
105
venv/Lib/site-packages/joblib/externals/loky/backend/_posix_wait.py
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
###############################################################################
|
||||
# Compat for wait function on UNIX based system
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/connection.py (17/02/2017)
|
||||
# * Backport wait function to python2.7
|
||||
#
|
||||
|
||||
import platform
|
||||
import select
|
||||
import socket
|
||||
import errno
|
||||
SYSTEM = platform.system()
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError: # pragma: no cover
|
||||
ctypes = None # noqa
|
||||
|
||||
if SYSTEM == 'Darwin' and ctypes is not None:
|
||||
from ctypes.util import find_library
|
||||
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
|
||||
CoreServices = ctypes.CDLL(find_library('CoreServices'),
|
||||
use_errno=True)
|
||||
mach_absolute_time = libSystem.mach_absolute_time
|
||||
mach_absolute_time.restype = ctypes.c_uint64
|
||||
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
|
||||
absolute_to_nanoseconds.restype = ctypes.c_uint64
|
||||
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
|
||||
|
||||
def monotonic():
|
||||
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
|
||||
|
||||
elif SYSTEM == 'Linux' and ctypes is not None:
|
||||
# from stackoverflow:
|
||||
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
|
||||
import ctypes
|
||||
import os
|
||||
|
||||
CLOCK_MONOTONIC = 1 # see <linux/time.h>
|
||||
|
||||
class timespec(ctypes.Structure):
|
||||
_fields_ = [
|
||||
('tv_sec', ctypes.c_long),
|
||||
('tv_nsec', ctypes.c_long),
|
||||
]
|
||||
|
||||
librt = ctypes.CDLL('librt.so.1', use_errno=True)
|
||||
clock_gettime = librt.clock_gettime
|
||||
clock_gettime.argtypes = [
|
||||
ctypes.c_int, ctypes.POINTER(timespec),
|
||||
]
|
||||
|
||||
def monotonic(): # noqa
|
||||
t = timespec()
|
||||
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
|
||||
errno_ = ctypes.get_errno()
|
||||
raise OSError(errno_, os.strerror(errno_))
|
||||
return t.tv_sec + t.tv_nsec * 1e-9
|
||||
else: # pragma: no cover
|
||||
from time import time as monotonic
|
||||
|
||||
|
||||
if hasattr(select, 'poll'):
|
||||
def _poll(fds, timeout):
|
||||
if timeout is not None:
|
||||
timeout = int(timeout * 1000) # timeout is in milliseconds
|
||||
fd_map = {}
|
||||
pollster = select.poll()
|
||||
for fd in fds:
|
||||
pollster.register(fd, select.POLLIN)
|
||||
if hasattr(fd, 'fileno'):
|
||||
fd_map[fd.fileno()] = fd
|
||||
else:
|
||||
fd_map[fd] = fd
|
||||
ls = []
|
||||
for fd, event in pollster.poll(timeout):
|
||||
if event & select.POLLNVAL: # pragma: no cover
|
||||
raise ValueError('invalid file descriptor %i' % fd)
|
||||
ls.append(fd_map[fd])
|
||||
return ls
|
||||
else:
|
||||
def _poll(fds, timeout):
|
||||
return select.select(fds, [], [], timeout)[0]
|
||||
|
||||
|
||||
def wait(object_list, timeout=None):
|
||||
'''
|
||||
Wait till an object in object_list is ready/readable.
|
||||
Returns list of those objects which are ready/readable.
|
||||
'''
|
||||
if timeout is not None:
|
||||
if timeout <= 0:
|
||||
return _poll(object_list, 0)
|
||||
else:
|
||||
deadline = monotonic() + timeout
|
||||
while True:
|
||||
try:
|
||||
return _poll(object_list, timeout)
|
||||
except (OSError, IOError, socket.error) as e: # pragma: no cover
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
if timeout is not None:
|
||||
timeout = deadline - monotonic()
|
99
venv/Lib/site-packages/joblib/externals/loky/backend/_win_reduction.py
vendored
Normal file
99
venv/Lib/site-packages/joblib/externals/loky/backend/_win_reduction.py
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
###############################################################################
|
||||
# Extra reducers for Windows system and connections objects
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Add adapted reduction for LokyProcesses and socket/PipeConnection
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
from .reduction import register
|
||||
|
||||
|
||||
if sys.platform == 'win32':
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
from _multiprocessing import PipeConnection
|
||||
else:
|
||||
import _winapi
|
||||
from multiprocessing.connection import PipeConnection
|
||||
|
||||
|
||||
if sys.version_info[:2] >= (3, 4) and sys.platform == 'win32':
|
||||
class DupHandle(object):
|
||||
def __init__(self, handle, access, pid=None):
|
||||
# duplicate handle for process with given pid
|
||||
if pid is None:
|
||||
pid = os.getpid()
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
|
||||
try:
|
||||
self._handle = _winapi.DuplicateHandle(
|
||||
_winapi.GetCurrentProcess(),
|
||||
handle, proc, access, False, 0)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
self._access = access
|
||||
self._pid = pid
|
||||
|
||||
def detach(self):
|
||||
# retrieve handle from process which currently owns it
|
||||
if self._pid == os.getpid():
|
||||
return self._handle
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
|
||||
self._pid)
|
||||
try:
|
||||
return _winapi.DuplicateHandle(
|
||||
proc, self._handle, _winapi.GetCurrentProcess(),
|
||||
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
|
||||
def reduce_pipe_connection(conn):
|
||||
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
|
||||
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
|
||||
dh = DupHandle(conn.fileno(), access)
|
||||
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
|
||||
|
||||
def rebuild_pipe_connection(dh, readable, writable):
|
||||
from multiprocessing.connection import PipeConnection
|
||||
handle = dh.detach()
|
||||
return PipeConnection(handle, readable, writable)
|
||||
register(PipeConnection, reduce_pipe_connection)
|
||||
|
||||
elif sys.platform == 'win32':
|
||||
# Older Python versions
|
||||
from multiprocessing.reduction import reduce_pipe_connection
|
||||
register(PipeConnection, reduce_pipe_connection)
|
||||
|
||||
|
||||
if sys.version_info[:2] < (3, 3) and sys.platform == 'win32':
|
||||
from _multiprocessing import win32
|
||||
from multiprocessing.reduction import reduce_handle, rebuild_handle
|
||||
close = win32.CloseHandle
|
||||
|
||||
def fromfd(handle, family, type_, proto=0):
|
||||
s = socket.socket(family, type_, proto, fileno=handle)
|
||||
if s.__class__ is not socket.socket:
|
||||
s = socket.socket(_sock=s)
|
||||
return s
|
||||
|
||||
def reduce_socket(s):
|
||||
if not hasattr(socket, "fromfd"):
|
||||
raise TypeError("sockets cannot be pickled on this system.")
|
||||
reduced_handle = reduce_handle(s.fileno())
|
||||
return _rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
|
||||
|
||||
def _rebuild_socket(reduced_handle, family, type_, proto):
|
||||
handle = rebuild_handle(reduced_handle)
|
||||
s = fromfd(handle, family, type_, proto)
|
||||
close(handle)
|
||||
return s
|
||||
|
||||
register(socket.socket, reduce_socket)
|
||||
elif sys.version_info[:2] < (3, 4):
|
||||
from multiprocessing.reduction import reduce_socket
|
||||
register(socket.socket, reduce_socket)
|
||||
else:
|
||||
from multiprocessing.reduction import _reduce_socket
|
||||
register(socket.socket, _reduce_socket)
|
58
venv/Lib/site-packages/joblib/externals/loky/backend/_win_wait.py
vendored
Normal file
58
venv/Lib/site-packages/joblib/externals/loky/backend/_win_wait.py
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
###############################################################################
|
||||
# Compat for wait function on Windows system
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/connection.py (17/02/2017)
|
||||
# * Backport wait function to python2.7
|
||||
#
|
||||
|
||||
import ctypes
|
||||
import sys
|
||||
from time import sleep
|
||||
|
||||
|
||||
if sys.platform == 'win32' and sys.version_info[:2] < (3, 3):
|
||||
from _subprocess import WaitForSingleObject, WAIT_OBJECT_0
|
||||
|
||||
try:
|
||||
from time import monotonic
|
||||
except ImportError:
|
||||
# Backward old for crappy old Python that did not have cross-platform
|
||||
# monotonic clock by default.
|
||||
|
||||
# TODO: do we want to add support for cygwin at some point? See:
|
||||
# https://github.com/atdt/monotonic/blob/master/monotonic.py
|
||||
GetTickCount64 = ctypes.windll.kernel32.GetTickCount64
|
||||
GetTickCount64.restype = ctypes.c_ulonglong
|
||||
|
||||
def monotonic():
|
||||
"""Monotonic clock, cannot go backward."""
|
||||
return GetTickCount64() / 1000.0
|
||||
|
||||
def wait(handles, timeout=None):
|
||||
"""Backward compat for python2.7
|
||||
|
||||
This function wait for either:
|
||||
* one connection is ready for read,
|
||||
* one process handle has exited or got killed,
|
||||
* timeout is reached. Note that this function has a precision of 2
|
||||
msec.
|
||||
"""
|
||||
if timeout is not None:
|
||||
deadline = monotonic() + timeout
|
||||
|
||||
while True:
|
||||
# We cannot use select as in windows it only support sockets
|
||||
ready = []
|
||||
for h in handles:
|
||||
if type(h) in [int, long]:
|
||||
if WaitForSingleObject(h, 0) == WAIT_OBJECT_0:
|
||||
ready += [h]
|
||||
elif h.poll(0):
|
||||
ready.append(h)
|
||||
if len(ready) > 0:
|
||||
return ready
|
||||
sleep(.001)
|
||||
if timeout is not None and deadline - monotonic() <= 0:
|
||||
return []
|
41
venv/Lib/site-packages/joblib/externals/loky/backend/compat.py
vendored
Normal file
41
venv/Lib/site-packages/joblib/externals/loky/backend/compat.py
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
###############################################################################
|
||||
# Compat file to import the correct modules for each platform and python
|
||||
# version.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier grisel
|
||||
#
|
||||
import sys
|
||||
|
||||
PY3 = sys.version_info[:2] >= (3, 3)
|
||||
|
||||
if PY3:
|
||||
import queue
|
||||
else:
|
||||
import Queue as queue
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
from multiprocessing.process import BaseProcess
|
||||
else:
|
||||
from multiprocessing.process import Process as BaseProcess
|
||||
|
||||
# Platform specific compat
|
||||
if sys.platform == "win32":
|
||||
from .compat_win32 import wait
|
||||
else:
|
||||
from .compat_posix import wait
|
||||
|
||||
|
||||
def set_cause(exc, cause):
|
||||
exc.__cause__ = cause
|
||||
|
||||
if not PY3:
|
||||
# Preformat message here.
|
||||
if exc.__cause__ is not None:
|
||||
exc.args = ("{}\n\nThis was caused directly by {}".format(
|
||||
exc.args if len(exc.args) != 1 else exc.args[0],
|
||||
str(exc.__cause__)),)
|
||||
|
||||
return exc
|
||||
|
||||
|
||||
__all__ = ["queue", "BaseProcess", "set_cause", "wait"]
|
13
venv/Lib/site-packages/joblib/externals/loky/backend/compat_posix.py
vendored
Normal file
13
venv/Lib/site-packages/joblib/externals/loky/backend/compat_posix.py
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
# flake8: noqa
|
||||
###############################################################################
|
||||
# Compat file to load the correct wait function
|
||||
#
|
||||
# author: Thomas Moreau and Olivier grisel
|
||||
#
|
||||
import sys
|
||||
|
||||
# Compat wait
|
||||
if sys.version_info < (3, 3):
|
||||
from ._posix_wait import wait
|
||||
else:
|
||||
from multiprocessing.connection import wait
|
46
venv/Lib/site-packages/joblib/externals/loky/backend/compat_win32.py
vendored
Normal file
46
venv/Lib/site-packages/joblib/externals/loky/backend/compat_win32.py
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
# flake8: noqa: F401
|
||||
import sys
|
||||
import numbers
|
||||
|
||||
if sys.platform == "win32":
|
||||
# Avoid import error by code introspection tools such as test runners
|
||||
# trying to import this module while running on non-Windows systems.
|
||||
|
||||
# Compat Popen
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
from multiprocessing.popen_spawn_win32 import Popen
|
||||
else:
|
||||
from multiprocessing.forking import Popen
|
||||
|
||||
# wait compat
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
from ._win_wait import wait
|
||||
else:
|
||||
from multiprocessing.connection import wait
|
||||
|
||||
# Compat _winapi
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
import _winapi
|
||||
else:
|
||||
import os
|
||||
import msvcrt
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
import _subprocess as win_api
|
||||
from _multiprocessing import win32
|
||||
else:
|
||||
import _winapi as win_api
|
||||
|
||||
class _winapi:
|
||||
CreateProcess = win_api.CreateProcess
|
||||
|
||||
@staticmethod
|
||||
def CloseHandle(h):
|
||||
if isinstance(h, numbers.Integral):
|
||||
# Cast long to int for 64-bit Python 2.7 under Windows
|
||||
h = int(h)
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
if not isinstance(h, int):
|
||||
h = h.Detach()
|
||||
win32.CloseHandle(h)
|
||||
else:
|
||||
win_api.CloseHandle(h)
|
367
venv/Lib/site-packages/joblib/externals/loky/backend/context.py
vendored
Normal file
367
venv/Lib/site-packages/joblib/externals/loky/backend/context.py
vendored
Normal file
|
@ -0,0 +1,367 @@
|
|||
###############################################################################
|
||||
# Basic context management with LokyContext and provides
|
||||
# compat for UNIX 2.7 and 3.3
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/context.py
|
||||
# * Create a context ensuring loky uses only objects that are compatible
|
||||
# * Add LokyContext to the list of context of multiprocessing so loky can be
|
||||
# used with multiprocessing.set_start_method
|
||||
# * Add some compat function for python2.7 and 3.3.
|
||||
#
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import traceback
|
||||
import warnings
|
||||
import multiprocessing as mp
|
||||
|
||||
|
||||
from .process import LokyProcess, LokyInitMainProcess
|
||||
|
||||
START_METHODS = ['loky', 'loky_init_main']
|
||||
_DEFAULT_START_METHOD = None
|
||||
|
||||
# Cache for the number of physical cores to avoid repeating subprocess calls.
|
||||
# It should not change during the lifetime of the program.
|
||||
physical_cores_cache = None
|
||||
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
from multiprocessing import get_context as mp_get_context
|
||||
from multiprocessing.context import assert_spawning, set_spawning_popen
|
||||
from multiprocessing.context import get_spawning_popen, BaseContext
|
||||
|
||||
START_METHODS += ['spawn']
|
||||
if sys.platform != 'win32':
|
||||
START_METHODS += ['fork', 'forkserver']
|
||||
|
||||
def get_context(method=None):
|
||||
# Try to overload the default context
|
||||
method = method or _DEFAULT_START_METHOD or "loky"
|
||||
if method == "fork":
|
||||
# If 'fork' is explicitly requested, warn user about potential
|
||||
# issues.
|
||||
warnings.warn("`fork` start method should not be used with "
|
||||
"`loky` as it does not respect POSIX. Try using "
|
||||
"`spawn` or `loky` instead.", UserWarning)
|
||||
try:
|
||||
context = mp_get_context(method)
|
||||
except ValueError:
|
||||
raise ValueError("Unknown context '{}'. Value should be in {}."
|
||||
.format(method, START_METHODS))
|
||||
|
||||
return context
|
||||
|
||||
else:
|
||||
if sys.platform != 'win32':
|
||||
import threading
|
||||
# Mechanism to check that the current thread is spawning a process
|
||||
_tls = threading.local()
|
||||
popen_attr = 'spawning_popen'
|
||||
else:
|
||||
from multiprocessing.forking import Popen
|
||||
_tls = Popen._tls
|
||||
popen_attr = 'process_handle'
|
||||
|
||||
BaseContext = object
|
||||
|
||||
def get_spawning_popen():
|
||||
return getattr(_tls, popen_attr, None)
|
||||
|
||||
def set_spawning_popen(popen):
|
||||
setattr(_tls, popen_attr, popen)
|
||||
|
||||
def assert_spawning(obj):
|
||||
if get_spawning_popen() is None:
|
||||
raise RuntimeError(
|
||||
'%s objects should only be shared between processes'
|
||||
' through inheritance' % type(obj).__name__
|
||||
)
|
||||
|
||||
def get_context(method=None):
|
||||
method = method or _DEFAULT_START_METHOD or 'loky'
|
||||
if method == "loky":
|
||||
return LokyContext()
|
||||
elif method == "loky_init_main":
|
||||
return LokyInitMainContext()
|
||||
else:
|
||||
raise ValueError("Unknown context '{}'. Value should be in {}."
|
||||
.format(method, START_METHODS))
|
||||
|
||||
|
||||
def set_start_method(method, force=False):
|
||||
global _DEFAULT_START_METHOD
|
||||
if _DEFAULT_START_METHOD is not None and not force:
|
||||
raise RuntimeError('context has already been set')
|
||||
assert method is None or method in START_METHODS, (
|
||||
"'{}' is not a valid start_method. It should be in {}"
|
||||
.format(method, START_METHODS))
|
||||
|
||||
_DEFAULT_START_METHOD = method
|
||||
|
||||
|
||||
def get_start_method():
|
||||
return _DEFAULT_START_METHOD
|
||||
|
||||
|
||||
def cpu_count(only_physical_cores=False):
|
||||
"""Return the number of CPUs the current process can use.
|
||||
|
||||
The returned number of CPUs accounts for:
|
||||
* the number of CPUs in the system, as given by
|
||||
``multiprocessing.cpu_count``;
|
||||
* the CPU affinity settings of the current process
|
||||
(available with Python 3.4+ on some Unix systems);
|
||||
* CFS scheduler CPU bandwidth limit (available on Linux only, typically
|
||||
set by docker and similar container orchestration systems);
|
||||
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
|
||||
and is given as the minimum of these constraints.
|
||||
|
||||
If ``only_physical_cores`` is True, return the number of physical cores
|
||||
instead of the number of logical cores (hyperthreading / SMT). Note that
|
||||
this option is not enforced if the number of usable cores is controlled in
|
||||
any other way such as: process affinity, restricting CFS scheduler policy
|
||||
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
|
||||
cores is not found, return the number of logical cores.
|
||||
|
||||
It is also always larger or equal to 1.
|
||||
"""
|
||||
# TODO: use os.cpu_count when dropping python 2 support
|
||||
try:
|
||||
cpu_count_mp = mp.cpu_count()
|
||||
except NotImplementedError:
|
||||
cpu_count_mp = 1
|
||||
|
||||
cpu_count_user = _cpu_count_user(cpu_count_mp)
|
||||
aggregate_cpu_count = min(cpu_count_mp, cpu_count_user)
|
||||
|
||||
if only_physical_cores:
|
||||
cpu_count_physical, exception = _count_physical_cores()
|
||||
if cpu_count_user < cpu_count_mp:
|
||||
# Respect user setting
|
||||
cpu_count = max(cpu_count_user, 1)
|
||||
elif cpu_count_physical == "not found":
|
||||
# Fallback to default behavior
|
||||
if exception is not None:
|
||||
# warns only the first time
|
||||
warnings.warn(
|
||||
"Could not find the number of physical cores for the "
|
||||
"following reason:\n" + str(exception) + "\n"
|
||||
"Returning the number of logical cores instead. You can "
|
||||
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
|
||||
"the number of cores you want to use.")
|
||||
if sys.version_info >= (3, 5):
|
||||
# TODO remove the version check when dropping py2 support
|
||||
traceback.print_tb(exception.__traceback__)
|
||||
|
||||
cpu_count = max(aggregate_cpu_count, 1)
|
||||
else:
|
||||
return cpu_count_physical
|
||||
else:
|
||||
cpu_count = max(aggregate_cpu_count, 1)
|
||||
|
||||
return cpu_count
|
||||
|
||||
|
||||
def _cpu_count_user(cpu_count_mp):
|
||||
"""Number of user defined available CPUs"""
|
||||
import math
|
||||
|
||||
# Number of available CPUs given affinity settings
|
||||
cpu_count_affinity = cpu_count_mp
|
||||
if hasattr(os, 'sched_getaffinity'):
|
||||
try:
|
||||
cpu_count_affinity = len(os.sched_getaffinity(0))
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
# CFS scheduler CPU bandwidth limit
|
||||
# available in Linux since 2.6 kernel
|
||||
cpu_count_cfs = cpu_count_mp
|
||||
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
if os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
|
||||
with open(cfs_quota_fname, 'r') as fh:
|
||||
cfs_quota_us = int(fh.read())
|
||||
with open(cfs_period_fname, 'r') as fh:
|
||||
cfs_period_us = int(fh.read())
|
||||
|
||||
if cfs_quota_us > 0 and cfs_period_us > 0:
|
||||
# Make sure this quantity is an int as math.ceil returns a
|
||||
# float in python2.7. (See issue #165)
|
||||
cpu_count_cfs = int(math.ceil(cfs_quota_us / cfs_period_us))
|
||||
|
||||
# User defined soft-limit passed as a loky specific environment variable.
|
||||
cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', cpu_count_mp))
|
||||
|
||||
return min(cpu_count_affinity, cpu_count_cfs, cpu_count_loky)
|
||||
|
||||
|
||||
def _count_physical_cores():
|
||||
"""Return a tuple (number of physical cores, exception)
|
||||
|
||||
If the number of physical cores is found, exception is set to None.
|
||||
If it has not been found, return ("not found", exception).
|
||||
|
||||
The number of physical cores is cached to avoid repeating subprocess calls.
|
||||
"""
|
||||
exception = None
|
||||
|
||||
# First check if the value is cached
|
||||
global physical_cores_cache
|
||||
if physical_cores_cache is not None:
|
||||
return physical_cores_cache, exception
|
||||
|
||||
# Not cached yet, find it
|
||||
try:
|
||||
if sys.platform == "linux":
|
||||
cpu_info = subprocess.run(
|
||||
"lscpu --parse=core".split(" "), capture_output=True)
|
||||
cpu_info = cpu_info.stdout.decode("utf-8").splitlines()
|
||||
cpu_info = {line for line in cpu_info if not line.startswith("#")}
|
||||
cpu_count_physical = len(cpu_info)
|
||||
elif sys.platform == "win32":
|
||||
cpu_info = subprocess.run(
|
||||
"wmic CPU Get NumberOfCores /Format:csv".split(" "),
|
||||
capture_output=True)
|
||||
cpu_info = cpu_info.stdout.decode('utf-8').splitlines()
|
||||
cpu_info = [l.split(",")[1] for l in cpu_info
|
||||
if (l and l != "Node,NumberOfCores")]
|
||||
cpu_count_physical = sum(map(int, cpu_info))
|
||||
elif sys.platform == "darwin":
|
||||
cpu_info = subprocess.run(
|
||||
"sysctl -n hw.physicalcpu".split(" "), capture_output=True)
|
||||
cpu_info = cpu_info.stdout.decode('utf-8')
|
||||
cpu_count_physical = int(cpu_info)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"unsupported platform: {}".format(sys.platform))
|
||||
|
||||
# if cpu_count_physical < 1, we did not find a valid value
|
||||
if cpu_count_physical < 1:
|
||||
raise ValueError(
|
||||
"found {} physical cores < 1".format(cpu_count_physical))
|
||||
|
||||
except Exception as e:
|
||||
exception = e
|
||||
cpu_count_physical = "not found"
|
||||
|
||||
# Put the result in cache
|
||||
physical_cores_cache = cpu_count_physical
|
||||
|
||||
return cpu_count_physical, exception
|
||||
|
||||
|
||||
class LokyContext(BaseContext):
|
||||
"""Context relying on the LokyProcess."""
|
||||
_name = 'loky'
|
||||
Process = LokyProcess
|
||||
cpu_count = staticmethod(cpu_count)
|
||||
|
||||
def Queue(self, maxsize=0, reducers=None):
|
||||
'''Returns a queue object'''
|
||||
from .queues import Queue
|
||||
return Queue(maxsize, reducers=reducers,
|
||||
ctx=self.get_context())
|
||||
|
||||
def SimpleQueue(self, reducers=None):
|
||||
'''Returns a queue object'''
|
||||
from .queues import SimpleQueue
|
||||
return SimpleQueue(reducers=reducers, ctx=self.get_context())
|
||||
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
"""Compat for python2.7/3.3 for necessary methods in Context"""
|
||||
def get_context(self):
|
||||
return self
|
||||
|
||||
def get_start_method(self):
|
||||
return self._name
|
||||
|
||||
def Pipe(self, duplex=True):
|
||||
'''Returns two connection object connected by a pipe'''
|
||||
return mp.Pipe(duplex)
|
||||
|
||||
if sys.platform != "win32":
|
||||
"""Use the compat Manager for python2.7/3.3 on UNIX to avoid
|
||||
relying on fork processes
|
||||
"""
|
||||
def Manager(self):
|
||||
"""Returns a manager object"""
|
||||
from .managers import LokyManager
|
||||
m = LokyManager()
|
||||
m.start()
|
||||
return m
|
||||
else:
|
||||
"""Compat for context on Windows and python2.7/3.3. Using regular
|
||||
multiprocessing objects as it does not rely on fork.
|
||||
"""
|
||||
from multiprocessing import synchronize
|
||||
Semaphore = staticmethod(synchronize.Semaphore)
|
||||
BoundedSemaphore = staticmethod(synchronize.BoundedSemaphore)
|
||||
Lock = staticmethod(synchronize.Lock)
|
||||
RLock = staticmethod(synchronize.RLock)
|
||||
Condition = staticmethod(synchronize.Condition)
|
||||
Event = staticmethod(synchronize.Event)
|
||||
Manager = staticmethod(mp.Manager)
|
||||
|
||||
if sys.platform != "win32":
|
||||
"""For Unix platform, use our custom implementation of synchronize
|
||||
relying on ctypes to interface with pthread semaphores.
|
||||
"""
|
||||
def Semaphore(self, value=1):
|
||||
"""Returns a semaphore object"""
|
||||
from .synchronize import Semaphore
|
||||
return Semaphore(value=value)
|
||||
|
||||
def BoundedSemaphore(self, value):
|
||||
"""Returns a bounded semaphore object"""
|
||||
from .synchronize import BoundedSemaphore
|
||||
return BoundedSemaphore(value)
|
||||
|
||||
def Lock(self):
|
||||
"""Returns a lock object"""
|
||||
from .synchronize import Lock
|
||||
return Lock()
|
||||
|
||||
def RLock(self):
|
||||
"""Returns a recurrent lock object"""
|
||||
from .synchronize import RLock
|
||||
return RLock()
|
||||
|
||||
def Condition(self, lock=None):
|
||||
"""Returns a condition object"""
|
||||
from .synchronize import Condition
|
||||
return Condition(lock)
|
||||
|
||||
def Event(self):
|
||||
"""Returns an event object"""
|
||||
from .synchronize import Event
|
||||
return Event()
|
||||
|
||||
|
||||
class LokyInitMainContext(LokyContext):
|
||||
"""Extra context with LokyProcess, which does load the main module
|
||||
|
||||
This context is used for compatibility in the case ``cloudpickle`` is not
|
||||
present on the running system. This permits to load functions defined in
|
||||
the ``main`` module, using proper safeguards. The declaration of the
|
||||
``executor`` should be protected by ``if __name__ == "__main__":`` and the
|
||||
functions and variable used from main should be out of this block.
|
||||
|
||||
This mimics the default behavior of multiprocessing under Windows and the
|
||||
behavior of the ``spawn`` start method on a posix system for python3.4+.
|
||||
For more details, see the end of the following section of python doc
|
||||
https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
|
||||
"""
|
||||
_name = 'loky_init_main'
|
||||
Process = LokyInitMainProcess
|
||||
|
||||
|
||||
if sys.version_info > (3, 4):
|
||||
"""Register loky context so it works with multiprocessing.get_context"""
|
||||
ctx_loky = LokyContext()
|
||||
mp.context._concrete_contexts['loky'] = ctx_loky
|
||||
mp.context._concrete_contexts['loky_init_main'] = LokyInitMainContext()
|
48
venv/Lib/site-packages/joblib/externals/loky/backend/fork_exec.py
vendored
Normal file
48
venv/Lib/site-packages/joblib/externals/loky/backend/fork_exec.py
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
###############################################################################
|
||||
# Launch a subprocess using forkexec and make sure only the needed fd are
|
||||
# shared in the two process.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
|
||||
if sys.platform == "darwin" and sys.version_info < (3, 3):
|
||||
FileNotFoundError = OSError
|
||||
|
||||
|
||||
def close_fds(keep_fds): # pragma: no cover
|
||||
"""Close all the file descriptors except those in keep_fds."""
|
||||
|
||||
# Make sure to keep stdout and stderr open for logging purpose
|
||||
keep_fds = set(keep_fds).union([1, 2])
|
||||
|
||||
# We try to retrieve all the open fds
|
||||
try:
|
||||
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
|
||||
except FileNotFoundError:
|
||||
import resource
|
||||
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
|
||||
open_fds = set(fd for fd in range(3, max_nfds))
|
||||
open_fds.add(0)
|
||||
|
||||
for i in open_fds - keep_fds:
|
||||
try:
|
||||
os.close(i)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def fork_exec(cmd, keep_fds, env=None):
|
||||
|
||||
# copy the environment variables to set in the child process
|
||||
env = {} if env is None else env
|
||||
child_env = os.environ.copy()
|
||||
child_env.update(env)
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0: # pragma: no cover
|
||||
close_fds(keep_fds)
|
||||
os.execve(sys.executable, cmd, child_env)
|
||||
else:
|
||||
return pid
|
51
venv/Lib/site-packages/joblib/externals/loky/backend/managers.py
vendored
Normal file
51
venv/Lib/site-packages/joblib/externals/loky/backend/managers.py
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
###############################################################################
|
||||
# compat for UNIX 2.7 and 3.3
|
||||
# Manager with LokyContext server.
|
||||
# This avoids having a Manager using fork and breaks the fd.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# based on multiprocessing/managers.py (17/02/2017)
|
||||
# * Overload the start method to use LokyContext and launch a loky subprocess
|
||||
#
|
||||
|
||||
import multiprocessing as mp
|
||||
from multiprocessing.managers import SyncManager, State
|
||||
from .process import LokyProcess as Process
|
||||
|
||||
|
||||
class LokyManager(SyncManager):
|
||||
def start(self, initializer=None, initargs=()):
|
||||
'''Spawn a server process for this manager object'''
|
||||
assert self._state.value == State.INITIAL
|
||||
|
||||
if (initializer is not None
|
||||
and not hasattr(initializer, '__call__')):
|
||||
raise TypeError('initializer must be a callable')
|
||||
|
||||
# pipe over which we will retrieve address of server
|
||||
reader, writer = mp.Pipe(duplex=False)
|
||||
|
||||
# spawn process which runs a server
|
||||
self._process = Process(
|
||||
target=type(self)._run_server,
|
||||
args=(self._registry, self._address, bytes(self._authkey),
|
||||
self._serializer, writer, initializer, initargs),
|
||||
)
|
||||
ident = ':'.join(str(i) for i in self._process._identity)
|
||||
self._process.name = type(self).__name__ + '-' + ident
|
||||
self._process.start()
|
||||
|
||||
# get address of server
|
||||
writer.close()
|
||||
self._address = reader.recv()
|
||||
reader.close()
|
||||
|
||||
# register a finalizer
|
||||
self._state.value = State.STARTED
|
||||
self.shutdown = mp.util.Finalize(
|
||||
self, type(self)._finalize_manager,
|
||||
args=(self._process, self._address, self._authkey,
|
||||
self._state, self._Client),
|
||||
exitpriority=0
|
||||
)
|
215
venv/Lib/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
vendored
Normal file
215
venv/Lib/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
###############################################################################
|
||||
# Popen for LokyProcess.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import pickle
|
||||
from io import BytesIO
|
||||
|
||||
from . import reduction, spawn
|
||||
from .context import get_spawning_popen, set_spawning_popen
|
||||
from multiprocessing import util, process
|
||||
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
ProcessLookupError = OSError
|
||||
|
||||
if sys.platform != "win32":
|
||||
from . import resource_tracker
|
||||
|
||||
|
||||
__all__ = []
|
||||
|
||||
if sys.platform != "win32":
|
||||
#
|
||||
# Wrapper for an fd used while launching a process
|
||||
#
|
||||
|
||||
class _DupFd(object):
|
||||
def __init__(self, fd):
|
||||
self.fd = reduction._mk_inheritable(fd)
|
||||
|
||||
def detach(self):
|
||||
return self.fd
|
||||
|
||||
#
|
||||
# Start child process using subprocess.Popen
|
||||
#
|
||||
|
||||
__all__.append('Popen')
|
||||
|
||||
class Popen(object):
|
||||
method = 'loky'
|
||||
DupFd = _DupFd
|
||||
|
||||
def __init__(self, process_obj):
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self.returncode = None
|
||||
self._fds = []
|
||||
self._launch(process_obj)
|
||||
|
||||
if sys.version_info < (3, 4):
|
||||
@classmethod
|
||||
def duplicate_for_child(cls, fd):
|
||||
popen = get_spawning_popen()
|
||||
popen._fds.append(fd)
|
||||
return reduction._mk_inheritable(fd)
|
||||
|
||||
else:
|
||||
def duplicate_for_child(self, fd):
|
||||
self._fds.append(fd)
|
||||
return reduction._mk_inheritable(fd)
|
||||
|
||||
def poll(self, flag=os.WNOHANG):
|
||||
if self.returncode is None:
|
||||
while True:
|
||||
try:
|
||||
pid, sts = os.waitpid(self.pid, flag)
|
||||
except OSError:
|
||||
# Child process not yet created. See #1731717
|
||||
# e.errno == errno.ECHILD == 10
|
||||
return None
|
||||
else:
|
||||
break
|
||||
if pid == self.pid:
|
||||
if os.WIFSIGNALED(sts):
|
||||
self.returncode = -os.WTERMSIG(sts)
|
||||
else:
|
||||
assert os.WIFEXITED(sts)
|
||||
self.returncode = os.WEXITSTATUS(sts)
|
||||
return self.returncode
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if sys.version_info < (3, 3):
|
||||
import time
|
||||
if timeout is None:
|
||||
return self.poll(0)
|
||||
deadline = time.time() + timeout
|
||||
delay = 0.0005
|
||||
while 1:
|
||||
res = self.poll()
|
||||
if res is not None:
|
||||
break
|
||||
remaining = deadline - time.time()
|
||||
if remaining <= 0:
|
||||
break
|
||||
delay = min(delay * 2, remaining, 0.05)
|
||||
time.sleep(delay)
|
||||
return res
|
||||
|
||||
if self.returncode is None:
|
||||
if timeout is not None:
|
||||
from multiprocessing.connection import wait
|
||||
if not wait([self.sentinel], timeout):
|
||||
return None
|
||||
# This shouldn't block if wait() returned successfully.
|
||||
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
||||
return self.returncode
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
except OSError:
|
||||
if self.wait(timeout=0.1) is None:
|
||||
raise
|
||||
|
||||
def _launch(self, process_obj):
|
||||
|
||||
tracker_fd = resource_tracker._resource_tracker.getfd()
|
||||
|
||||
fp = BytesIO()
|
||||
set_spawning_popen(self)
|
||||
try:
|
||||
prep_data = spawn.get_preparation_data(
|
||||
process_obj._name,
|
||||
getattr(process_obj, "init_main_module", True))
|
||||
reduction.dump(prep_data, fp)
|
||||
reduction.dump(process_obj, fp)
|
||||
|
||||
finally:
|
||||
set_spawning_popen(None)
|
||||
|
||||
try:
|
||||
parent_r, child_w = os.pipe()
|
||||
child_r, parent_w = os.pipe()
|
||||
# for fd in self._fds:
|
||||
# _mk_inheritable(fd)
|
||||
|
||||
cmd_python = [sys.executable]
|
||||
cmd_python += ['-m', self.__module__]
|
||||
cmd_python += ['--process-name', str(process_obj.name)]
|
||||
cmd_python += ['--pipe',
|
||||
str(reduction._mk_inheritable(child_r))]
|
||||
reduction._mk_inheritable(child_w)
|
||||
reduction._mk_inheritable(tracker_fd)
|
||||
self._fds.extend([child_r, child_w, tracker_fd])
|
||||
if sys.version_info >= (3, 8) and os.name == 'posix':
|
||||
mp_tracker_fd = prep_data['mp_tracker_args']['fd']
|
||||
self.duplicate_for_child(mp_tracker_fd)
|
||||
|
||||
from .fork_exec import fork_exec
|
||||
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
|
||||
util.debug("launched python with pid {} and cmd:\n{}"
|
||||
.format(pid, cmd_python))
|
||||
self.sentinel = parent_r
|
||||
|
||||
method = 'getbuffer'
|
||||
if not hasattr(fp, method):
|
||||
method = 'getvalue'
|
||||
with os.fdopen(parent_w, 'wb') as f:
|
||||
f.write(getattr(fp, method)())
|
||||
self.pid = pid
|
||||
finally:
|
||||
if parent_r is not None:
|
||||
util.Finalize(self, os.close, (parent_r,))
|
||||
for fd in (child_r, child_w):
|
||||
if fd is not None:
|
||||
os.close(fd)
|
||||
|
||||
@staticmethod
|
||||
def thread_is_spawning():
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser('Command line parser')
|
||||
parser.add_argument('--pipe', type=int, required=True,
|
||||
help='File handle for the pipe')
|
||||
parser.add_argument('--process-name', type=str, default=None,
|
||||
help='Identifier for debugging purpose')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
info = dict()
|
||||
|
||||
exitcode = 1
|
||||
try:
|
||||
with os.fdopen(args.pipe, 'rb') as from_parent:
|
||||
process.current_process()._inheriting = True
|
||||
try:
|
||||
prep_data = pickle.load(from_parent)
|
||||
spawn.prepare(prep_data)
|
||||
process_obj = pickle.load(from_parent)
|
||||
finally:
|
||||
del process.current_process()._inheriting
|
||||
|
||||
exitcode = process_obj._bootstrap()
|
||||
except Exception:
|
||||
print('\n\n' + '-' * 80)
|
||||
print('{} failed with traceback: '.format(args.process_name))
|
||||
print('-' * 80)
|
||||
import traceback
|
||||
print(traceback.format_exc())
|
||||
print('\n' + '-' * 80)
|
||||
finally:
|
||||
if from_parent is not None:
|
||||
from_parent.close()
|
||||
|
||||
sys.exit(exitcode)
|
173
venv/Lib/site-packages/joblib/externals/loky/backend/popen_loky_win32.py
vendored
Normal file
173
venv/Lib/site-packages/joblib/externals/loky/backend/popen_loky_win32.py
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
import os
|
||||
import sys
|
||||
from pickle import load
|
||||
from multiprocessing import process, util
|
||||
|
||||
from . import spawn
|
||||
from . import reduction
|
||||
from .context import get_spawning_popen, set_spawning_popen
|
||||
|
||||
if sys.platform == "win32":
|
||||
# Avoid import error by code introspection tools such as test runners
|
||||
# trying to import this module while running on non-Windows systems.
|
||||
import msvcrt
|
||||
from .compat_win32 import _winapi
|
||||
from .compat_win32 import Popen as _Popen
|
||||
from .reduction import duplicate
|
||||
else:
|
||||
_Popen = object
|
||||
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
from os import fdopen as open
|
||||
|
||||
__all__ = ['Popen']
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
TERMINATE = 0x10000
|
||||
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
|
||||
def _path_eq(p1, p2):
|
||||
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
|
||||
|
||||
|
||||
WINENV = (hasattr(sys, "_base_executable")
|
||||
and not _path_eq(sys.executable, sys._base_executable))
|
||||
|
||||
#
|
||||
# We define a Popen class similar to the one from subprocess, but
|
||||
# whose constructor takes a process object as its argument.
|
||||
#
|
||||
|
||||
|
||||
class Popen(_Popen):
|
||||
'''
|
||||
Start a subprocess to run the code of a process object
|
||||
'''
|
||||
method = 'loky'
|
||||
|
||||
def __init__(self, process_obj):
|
||||
prep_data = spawn.get_preparation_data(
|
||||
process_obj._name, getattr(process_obj, "init_main_module", True))
|
||||
|
||||
# read end of pipe will be "stolen" by the child process
|
||||
# -- see spawn_main() in spawn.py.
|
||||
rfd, wfd = os.pipe()
|
||||
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
|
||||
os.close(rfd)
|
||||
|
||||
cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle)
|
||||
cmd = ' '.join('"%s"' % x for x in cmd)
|
||||
|
||||
python_exe = spawn.get_executable()
|
||||
|
||||
# copy the environment variables to set in the child process
|
||||
child_env = os.environ.copy()
|
||||
child_env.update(process_obj.env)
|
||||
|
||||
# bpo-35797: When running in a venv, we bypass the redirect
|
||||
# executor and launch our base Python.
|
||||
if WINENV and _path_eq(python_exe, sys.executable):
|
||||
python_exe = sys._base_executable
|
||||
child_env["__PYVENV_LAUNCHER__"] = sys.executable
|
||||
|
||||
try:
|
||||
with open(wfd, 'wb') as to_child:
|
||||
# start process
|
||||
try:
|
||||
# This flag allows to pass inheritable handles from the
|
||||
# parent to the child process in a python2-3 compatible way
|
||||
# (see
|
||||
# https://github.com/tomMoral/loky/pull/204#discussion_r290719629
|
||||
# for more detail). When support for Python 2 is dropped,
|
||||
# the cleaner multiprocessing.reduction.steal_handle should
|
||||
# be used instead.
|
||||
inherit = True
|
||||
hp, ht, pid, tid = _winapi.CreateProcess(
|
||||
python_exe, cmd,
|
||||
None, None, inherit, 0,
|
||||
child_env, None, None)
|
||||
_winapi.CloseHandle(ht)
|
||||
except BaseException:
|
||||
_winapi.CloseHandle(rhandle)
|
||||
raise
|
||||
|
||||
# set attributes of self
|
||||
self.pid = pid
|
||||
self.returncode = None
|
||||
self._handle = hp
|
||||
self.sentinel = int(hp)
|
||||
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
|
||||
|
||||
# send information to child
|
||||
set_spawning_popen(self)
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
Popen._tls.process_handle = int(hp)
|
||||
try:
|
||||
reduction.dump(prep_data, to_child)
|
||||
reduction.dump(process_obj, to_child)
|
||||
finally:
|
||||
set_spawning_popen(None)
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
del Popen._tls.process_handle
|
||||
except IOError as exc:
|
||||
# IOError 22 happens when the launched subprocess terminated before
|
||||
# wfd.close is called. Thus we can safely ignore it.
|
||||
if exc.errno != 22:
|
||||
raise
|
||||
util.debug("While starting {}, ignored a IOError 22"
|
||||
.format(process_obj._name))
|
||||
|
||||
def duplicate_for_child(self, handle):
|
||||
assert self is get_spawning_popen()
|
||||
return duplicate(handle, self.sentinel)
|
||||
|
||||
|
||||
def get_command_line(pipe_handle, **kwds):
|
||||
'''
|
||||
Returns prefix of command line used for spawning a child process
|
||||
'''
|
||||
if getattr(sys, 'frozen', False):
|
||||
return ([sys.executable, '--multiprocessing-fork', pipe_handle])
|
||||
else:
|
||||
prog = 'from joblib.externals.loky.backend.popen_loky_win32 import main; main()'
|
||||
opts = util._args_from_interpreter_flags()
|
||||
return [spawn.get_executable()] + opts + [
|
||||
'-c', prog, '--multiprocessing-fork', pipe_handle]
|
||||
|
||||
|
||||
def is_forking(argv):
|
||||
'''
|
||||
Return whether commandline indicates we are forking
|
||||
'''
|
||||
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
|
||||
assert len(argv) == 3
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
'''
|
||||
Run code specified by data received over pipe
|
||||
'''
|
||||
assert is_forking(sys.argv)
|
||||
|
||||
handle = int(sys.argv[-1])
|
||||
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
|
||||
from_parent = os.fdopen(fd, 'rb')
|
||||
|
||||
process.current_process()._inheriting = True
|
||||
preparation_data = load(from_parent)
|
||||
spawn.prepare(preparation_data)
|
||||
self = load(from_parent)
|
||||
process.current_process()._inheriting = False
|
||||
|
||||
from_parent.close()
|
||||
|
||||
exitcode = self._bootstrap()
|
||||
exit(exitcode)
|
108
venv/Lib/site-packages/joblib/externals/loky/backend/process.py
vendored
Normal file
108
venv/Lib/site-packages/joblib/externals/loky/backend/process.py
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
###############################################################################
|
||||
# LokyProcess implementation
|
||||
#
|
||||
# authors: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# based on multiprocessing/process.py (17/02/2017)
|
||||
# * Add some compatibility function for python2.7 and 3.3
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
from .compat import BaseProcess
|
||||
|
||||
|
||||
class LokyProcess(BaseProcess):
|
||||
_start_method = 'loky'
|
||||
|
||||
def __init__(self, group=None, target=None, name=None, args=(),
|
||||
kwargs={}, daemon=None, init_main_module=False,
|
||||
env=None):
|
||||
if sys.version_info < (3, 3):
|
||||
super(LokyProcess, self).__init__(
|
||||
group=group, target=target, name=name, args=args,
|
||||
kwargs=kwargs)
|
||||
self.daemon = daemon
|
||||
else:
|
||||
super(LokyProcess, self).__init__(
|
||||
group=group, target=target, name=name, args=args,
|
||||
kwargs=kwargs, daemon=daemon)
|
||||
self.env = {} if env is None else env
|
||||
self.authkey = self.authkey
|
||||
self.init_main_module = init_main_module
|
||||
|
||||
@staticmethod
|
||||
def _Popen(process_obj):
|
||||
if sys.platform == "win32":
|
||||
from .popen_loky_win32 import Popen
|
||||
else:
|
||||
from .popen_loky_posix import Popen
|
||||
return Popen(process_obj)
|
||||
|
||||
if sys.version_info < (3, 3):
|
||||
def start(self):
|
||||
'''
|
||||
Start child process
|
||||
'''
|
||||
from multiprocessing.process import _current_process, _cleanup
|
||||
assert self._popen is None, 'cannot start a process twice'
|
||||
assert self._parent_pid == os.getpid(), \
|
||||
'can only start a process object created by current process'
|
||||
_cleanup()
|
||||
self._popen = self._Popen(self)
|
||||
self._sentinel = self._popen.sentinel
|
||||
_current_process._children.add(self)
|
||||
|
||||
@property
|
||||
def sentinel(self):
|
||||
'''
|
||||
Return a file descriptor (Unix) or handle (Windows) suitable for
|
||||
waiting for process termination.
|
||||
'''
|
||||
try:
|
||||
return self._sentinel
|
||||
except AttributeError:
|
||||
raise ValueError("process not started")
|
||||
|
||||
if sys.version_info < (3, 4):
|
||||
@property
|
||||
def authkey(self):
|
||||
return self._authkey
|
||||
|
||||
@authkey.setter
|
||||
def authkey(self, authkey):
|
||||
'''
|
||||
Set authorization key of process
|
||||
'''
|
||||
self._authkey = AuthenticationKey(authkey)
|
||||
|
||||
def _bootstrap(self):
|
||||
from .context import set_start_method
|
||||
set_start_method(self._start_method)
|
||||
super(LokyProcess, self)._bootstrap()
|
||||
|
||||
|
||||
class LokyInitMainProcess(LokyProcess):
|
||||
_start_method = 'loky_init_main'
|
||||
|
||||
def __init__(self, group=None, target=None, name=None, args=(),
|
||||
kwargs={}, daemon=None):
|
||||
super(LokyInitMainProcess, self).__init__(
|
||||
group=group, target=target, name=name, args=args, kwargs=kwargs,
|
||||
daemon=daemon, init_main_module=True)
|
||||
|
||||
|
||||
#
|
||||
# We subclass bytes to avoid accidental transmission of auth keys over network
|
||||
#
|
||||
|
||||
class AuthenticationKey(bytes):
|
||||
def __reduce__(self):
|
||||
from .context import assert_spawning
|
||||
try:
|
||||
assert_spawning(self)
|
||||
except RuntimeError:
|
||||
raise TypeError(
|
||||
'Pickling an AuthenticationKey object is '
|
||||
'disallowed for security reasons'
|
||||
)
|
||||
return AuthenticationKey, (bytes(self),)
|
247
venv/Lib/site-packages/joblib/externals/loky/backend/queues.py
vendored
Normal file
247
venv/Lib/site-packages/joblib/externals/loky/backend/queues.py
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
###############################################################################
|
||||
# Queue and SimpleQueue implementation for loky
|
||||
#
|
||||
# authors: Thomas Moreau, Olivier Grisel
|
||||
#
|
||||
# based on multiprocessing/queues.py (16/02/2017)
|
||||
# * Add some compatibility function for python2.7 and 3.3 and makes sure
|
||||
# it uses the right synchronization primitive.
|
||||
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
|
||||
# pickling process. (overload Queue._feed/SimpleQueue.put)
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import weakref
|
||||
import threading
|
||||
|
||||
from multiprocessing import util
|
||||
from multiprocessing import connection
|
||||
from multiprocessing.synchronize import SEM_VALUE_MAX
|
||||
from multiprocessing.queues import Full
|
||||
from multiprocessing.queues import _sentinel, Queue as mp_Queue
|
||||
from multiprocessing.queues import SimpleQueue as mp_SimpleQueue
|
||||
|
||||
from .reduction import loads, dumps
|
||||
from .context import assert_spawning, get_context
|
||||
|
||||
|
||||
__all__ = ['Queue', 'SimpleQueue', 'Full']
|
||||
|
||||
|
||||
class Queue(mp_Queue):
|
||||
|
||||
def __init__(self, maxsize=0, reducers=None, ctx=None):
|
||||
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
super().__init__(maxsize=maxsize, ctx=ctx)
|
||||
else:
|
||||
if maxsize <= 0:
|
||||
# Can raise ImportError (see issues #3770 and #23400)
|
||||
maxsize = SEM_VALUE_MAX
|
||||
if ctx is None:
|
||||
ctx = get_context()
|
||||
self._maxsize = maxsize
|
||||
self._reader, self._writer = connection.Pipe(duplex=False)
|
||||
self._rlock = ctx.Lock()
|
||||
self._opid = os.getpid()
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = ctx.Lock()
|
||||
self._sem = ctx.BoundedSemaphore(maxsize)
|
||||
|
||||
# For use by concurrent.futures
|
||||
self._ignore_epipe = False
|
||||
|
||||
self._after_fork()
|
||||
|
||||
if sys.platform != 'win32':
|
||||
util.register_after_fork(self, Queue._after_fork)
|
||||
|
||||
self._reducers = reducers
|
||||
|
||||
# Use custom queue set/get state to be able to reduce the custom reducers
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._reducers, self._rlock, self._wlock, self._sem,
|
||||
self._opid)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._reducers, self._rlock, self._wlock, self._sem,
|
||||
self._opid) = state
|
||||
if sys.version_info >= (3, 9):
|
||||
self._reset()
|
||||
else:
|
||||
self._after_fork()
|
||||
|
||||
# Overload _start_thread to correctly call our custom _feed
|
||||
def _start_thread(self):
|
||||
util.debug('Queue._start_thread()')
|
||||
|
||||
# Start thread which transfers data from buffer to pipe
|
||||
self._buffer.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=Queue._feed,
|
||||
args=(self._buffer, self._notempty, self._send_bytes,
|
||||
self._wlock, self._writer.close, self._reducers,
|
||||
self._ignore_epipe, self._on_queue_feeder_error, self._sem),
|
||||
name='QueueFeederThread'
|
||||
)
|
||||
self._thread.daemon = True
|
||||
|
||||
util.debug('doing self._thread.start()')
|
||||
self._thread.start()
|
||||
util.debug('... done self._thread.start()')
|
||||
|
||||
# On process exit we will wait for data to be flushed to pipe.
|
||||
#
|
||||
# However, if this process created the queue then all
|
||||
# processes which use the queue will be descendants of this
|
||||
# process. Therefore waiting for the queue to be flushed
|
||||
# is pointless once all the child processes have been joined.
|
||||
created_by_this_process = (self._opid == os.getpid())
|
||||
if not self._joincancelled and not created_by_this_process:
|
||||
self._jointhread = util.Finalize(
|
||||
self._thread, Queue._finalize_join,
|
||||
[weakref.ref(self._thread)],
|
||||
exitpriority=-5
|
||||
)
|
||||
|
||||
# Send sentinel to the thread queue object when garbage collected
|
||||
self._close = util.Finalize(
|
||||
self, Queue._finalize_close,
|
||||
[self._buffer, self._notempty],
|
||||
exitpriority=10
|
||||
)
|
||||
|
||||
# Overload the _feed methods to use our custom pickling strategy.
|
||||
@staticmethod
|
||||
def _feed(buffer, notempty, send_bytes, writelock, close, reducers,
|
||||
ignore_epipe, onerror, queue_sem):
|
||||
util.debug('starting thread to feed data to pipe')
|
||||
nacquire = notempty.acquire
|
||||
nrelease = notempty.release
|
||||
nwait = notempty.wait
|
||||
bpopleft = buffer.popleft
|
||||
sentinel = _sentinel
|
||||
if sys.platform != 'win32':
|
||||
wacquire = writelock.acquire
|
||||
wrelease = writelock.release
|
||||
else:
|
||||
wacquire = None
|
||||
|
||||
while 1:
|
||||
try:
|
||||
nacquire()
|
||||
try:
|
||||
if not buffer:
|
||||
nwait()
|
||||
finally:
|
||||
nrelease()
|
||||
try:
|
||||
while 1:
|
||||
obj = bpopleft()
|
||||
if obj is sentinel:
|
||||
util.debug('feeder thread got sentinel -- exiting')
|
||||
close()
|
||||
return
|
||||
|
||||
# serialize the data before acquiring the lock
|
||||
obj_ = dumps(obj, reducers=reducers)
|
||||
if wacquire is None:
|
||||
send_bytes(obj_)
|
||||
else:
|
||||
wacquire()
|
||||
try:
|
||||
send_bytes(obj_)
|
||||
finally:
|
||||
wrelease()
|
||||
# Remove references early to avoid leaking memory
|
||||
del obj, obj_
|
||||
except IndexError:
|
||||
pass
|
||||
except BaseException as e:
|
||||
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
|
||||
return
|
||||
# Since this runs in a daemon thread the resources it uses
|
||||
# may be become unusable while the process is cleaning up.
|
||||
# We ignore errors which happen after the process has
|
||||
# started to cleanup.
|
||||
if util.is_exiting():
|
||||
util.info('error in queue thread: %s', e)
|
||||
return
|
||||
else:
|
||||
queue_sem.release()
|
||||
onerror(e, obj)
|
||||
|
||||
def _on_queue_feeder_error(self, e, obj):
|
||||
"""
|
||||
Private API hook called when feeding data in the background thread
|
||||
raises an exception. For overriding by concurrent.futures.
|
||||
"""
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
# Compat for python2.7/3.3 that use _send instead of _send_bytes
|
||||
def _after_fork(self):
|
||||
super(Queue, self)._after_fork()
|
||||
self._send_bytes = self._writer.send_bytes
|
||||
|
||||
|
||||
class SimpleQueue(mp_SimpleQueue):
|
||||
|
||||
def __init__(self, reducers=None, ctx=None):
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
super().__init__(ctx=ctx)
|
||||
else:
|
||||
# Use the context to create the sync objects for python2.7/3.3
|
||||
if ctx is None:
|
||||
ctx = get_context()
|
||||
self._reader, self._writer = connection.Pipe(duplex=False)
|
||||
self._rlock = ctx.Lock()
|
||||
self._poll = self._reader.poll
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = ctx.Lock()
|
||||
|
||||
# Add possiblity to use custom reducers
|
||||
self._reducers = reducers
|
||||
|
||||
def close(self):
|
||||
self._reader.close()
|
||||
self._writer.close()
|
||||
|
||||
# Use custom queue set/get state to be able to reduce the custom reducers
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._reader, self._writer, self._reducers, self._rlock,
|
||||
self._wlock)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._reader, self._writer, self._reducers, self._rlock,
|
||||
self._wlock) = state
|
||||
|
||||
if sys.version_info[:2] < (3, 4):
|
||||
# For python2.7/3.3, overload get to avoid creating deadlocks with
|
||||
# unpickling errors.
|
||||
def get(self):
|
||||
with self._rlock:
|
||||
res = self._reader.recv_bytes()
|
||||
# unserialize the data after having released the lock
|
||||
return loads(res)
|
||||
|
||||
# Overload put to use our customizable reducer
|
||||
def put(self, obj):
|
||||
# serialize the data before acquiring the lock
|
||||
obj = dumps(obj, reducers=self._reducers)
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self._writer.send_bytes(obj)
|
||||
else:
|
||||
with self._wlock:
|
||||
self._writer.send_bytes(obj)
|
280
venv/Lib/site-packages/joblib/externals/loky/backend/reduction.py
vendored
Normal file
280
venv/Lib/site-packages/joblib/externals/loky/backend/reduction.py
vendored
Normal file
|
@ -0,0 +1,280 @@
|
|||
###############################################################################
|
||||
# Customizable Pickler with some basic reducers
|
||||
#
|
||||
# author: Thomas Moreau
|
||||
#
|
||||
# adapted from multiprocessing/reduction.py (17/02/2017)
|
||||
# * Replace the ForkingPickler with a similar _LokyPickler,
|
||||
# * Add CustomizableLokyPickler to allow customizing pickling process
|
||||
# on the fly.
|
||||
#
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import functools
|
||||
from multiprocessing import util
|
||||
import types
|
||||
try:
|
||||
# Python 2 compat
|
||||
from cPickle import loads as pickle_loads
|
||||
except ImportError:
|
||||
from pickle import loads as pickle_loads
|
||||
import copyreg
|
||||
|
||||
from pickle import HIGHEST_PROTOCOL
|
||||
|
||||
if sys.platform == "win32":
|
||||
if sys.version_info[:2] > (3, 3):
|
||||
from multiprocessing.reduction import duplicate
|
||||
else:
|
||||
from multiprocessing.forking import duplicate
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Enable custom pickling in Loky.
|
||||
# To allow instance customization of the pickling process, we use 2 classes.
|
||||
# _ReducerRegistry gives module level customization and CustomizablePickler
|
||||
# permits to use instance base custom reducers. Only CustomizablePickler
|
||||
# should be used.
|
||||
|
||||
class _ReducerRegistry(object):
|
||||
"""Registry for custom reducers.
|
||||
|
||||
HIGHEST_PROTOCOL is selected by default as this pickler is used
|
||||
to pickle ephemeral datastructures for interprocess communication
|
||||
hence no backward compatibility is required.
|
||||
|
||||
"""
|
||||
|
||||
# We override the pure Python pickler as its the only way to be able to
|
||||
# customize the dispatch table without side effects in Python 2.6
|
||||
# to 3.2. For Python 3.3+ leverage the new dispatch_table
|
||||
# feature from http://bugs.python.org/issue14166 that makes it possible
|
||||
# to use the C implementation of the Pickler which is faster.
|
||||
|
||||
dispatch_table = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce_func):
|
||||
"""Attach a reducer function to a given type in the dispatch table."""
|
||||
if sys.version_info < (3,):
|
||||
# Python 2 pickler dispatching is not explicitly customizable.
|
||||
# Let us use a closure to workaround this limitation.
|
||||
def dispatcher(cls, obj):
|
||||
reduced = reduce_func(obj)
|
||||
cls.save_reduce(obj=obj, *reduced)
|
||||
cls.dispatch_table[type] = dispatcher
|
||||
else:
|
||||
cls.dispatch_table[type] = reduce_func
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Registers extra pickling routines to improve picklization for loky
|
||||
|
||||
register = _ReducerRegistry.register
|
||||
|
||||
|
||||
# make methods picklable
|
||||
def _reduce_method(m):
|
||||
if m.__self__ is None:
|
||||
return getattr, (m.__class__, m.__func__.__name__)
|
||||
else:
|
||||
return getattr, (m.__self__, m.__func__.__name__)
|
||||
|
||||
|
||||
class _C:
|
||||
def f(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def h(cls):
|
||||
pass
|
||||
|
||||
|
||||
register(type(_C().f), _reduce_method)
|
||||
register(type(_C.h), _reduce_method)
|
||||
|
||||
|
||||
if not hasattr(sys, "pypy_version_info"):
|
||||
# PyPy uses functions instead of method_descriptors and wrapper_descriptors
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
|
||||
register(type(list.append), _reduce_method_descriptor)
|
||||
register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
|
||||
# Make partial func pickable
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
||||
|
||||
|
||||
def _rebuild_partial(func, args, keywords):
|
||||
return functools.partial(func, *args, **keywords)
|
||||
|
||||
|
||||
register(functools.partial, _reduce_partial)
|
||||
|
||||
if sys.platform != "win32":
|
||||
from ._posix_reduction import _mk_inheritable # noqa: F401
|
||||
else:
|
||||
from . import _win_reduction # noqa: F401
|
||||
|
||||
# global variable to change the pickler behavior
|
||||
try:
|
||||
from joblib.externals import cloudpickle # noqa: F401
|
||||
DEFAULT_ENV = "cloudpickle"
|
||||
except ImportError:
|
||||
# If cloudpickle is not present, fallback to pickle
|
||||
DEFAULT_ENV = "pickle"
|
||||
|
||||
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
|
||||
_LokyPickler = None
|
||||
_loky_pickler_name = None
|
||||
|
||||
|
||||
def set_loky_pickler(loky_pickler=None):
|
||||
global _LokyPickler, _loky_pickler_name
|
||||
|
||||
if loky_pickler is None:
|
||||
loky_pickler = ENV_LOKY_PICKLER
|
||||
|
||||
loky_pickler_cls = None
|
||||
|
||||
# The default loky_pickler is cloudpickle
|
||||
if loky_pickler in ["", None]:
|
||||
loky_pickler = "cloudpickle"
|
||||
|
||||
if loky_pickler == _loky_pickler_name:
|
||||
return
|
||||
|
||||
if loky_pickler == "cloudpickle":
|
||||
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
|
||||
else:
|
||||
try:
|
||||
from importlib import import_module
|
||||
module_pickle = import_module(loky_pickler)
|
||||
loky_pickler_cls = module_pickle.Pickler
|
||||
except (ImportError, AttributeError) as e:
|
||||
extra_info = ("\nThis error occurred while setting loky_pickler to"
|
||||
" '{}', as required by the env variable LOKY_PICKLER"
|
||||
" or the function set_loky_pickler."
|
||||
.format(loky_pickler))
|
||||
e.args = (e.args[0] + extra_info,) + e.args[1:]
|
||||
e.msg = e.args[0]
|
||||
raise e
|
||||
|
||||
util.debug("Using '{}' for serialization."
|
||||
.format(loky_pickler if loky_pickler else "cloudpickle"))
|
||||
|
||||
class CustomizablePickler(loky_pickler_cls):
|
||||
_loky_pickler_cls = loky_pickler_cls
|
||||
|
||||
def _set_dispatch_table(self, dispatch_table):
|
||||
for ancestor_class in self._loky_pickler_cls.mro():
|
||||
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
|
||||
if isinstance(dt_attribute, types.MemberDescriptorType):
|
||||
# Ancestor class (typically _pickle.Pickler) has a
|
||||
# member_descriptor for its "dispatch_table" attribute. Use
|
||||
# it to set the dispatch_table as a member instead of a
|
||||
# dynamic attribute in the __dict__ of the instance,
|
||||
# otherwise it will not be taken into account by the C
|
||||
# implementation of the dump method if a subclass defines a
|
||||
# class-level dispatch_table attribute as was done in
|
||||
# cloudpickle 1.6.0:
|
||||
# https://github.com/joblib/loky/pull/260
|
||||
dt_attribute.__set__(self, dispatch_table)
|
||||
break
|
||||
|
||||
# On top of member descriptor set, also use setattr such that code
|
||||
# that directly access self.dispatch_table gets a consistent view
|
||||
# of the same table.
|
||||
self.dispatch_table = dispatch_table
|
||||
|
||||
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
|
||||
loky_pickler_cls.__init__(self, writer, protocol=protocol)
|
||||
if reducers is None:
|
||||
reducers = {}
|
||||
if sys.version_info < (3,):
|
||||
self.dispatch = loky_pickler_cls.dispatch.copy()
|
||||
self.dispatch.update(_ReducerRegistry.dispatch_table)
|
||||
else:
|
||||
if hasattr(self, "dispatch_table"):
|
||||
# Force a copy that we will update without mutating the
|
||||
# any class level defined dispatch_table.
|
||||
loky_dt = dict(self.dispatch_table)
|
||||
else:
|
||||
# Use standard reducers as bases
|
||||
loky_dt = copyreg.dispatch_table.copy()
|
||||
|
||||
# Register loky specific reducers
|
||||
loky_dt.update(_ReducerRegistry.dispatch_table)
|
||||
|
||||
# Set the new dispatch table, taking care of the fact that we
|
||||
# need to use the member_descriptor when we inherit from a
|
||||
# subclass of the C implementation of the Pickler base class
|
||||
# with an class level dispatch_table attribute.
|
||||
self._set_dispatch_table(loky_dt)
|
||||
|
||||
# Register custom reducers
|
||||
for type, reduce_func in reducers.items():
|
||||
self.register(type, reduce_func)
|
||||
|
||||
def register(self, type, reduce_func):
|
||||
"""Attach a reducer function to a given type in the dispatch table.
|
||||
"""
|
||||
if sys.version_info < (3,):
|
||||
# Python 2 pickler dispatching is not explicitly customizable.
|
||||
# Let us use a closure to workaround this limitation.
|
||||
def dispatcher(self, obj):
|
||||
reduced = reduce_func(obj)
|
||||
self.save_reduce(obj=obj, *reduced)
|
||||
self.dispatch[type] = dispatcher
|
||||
else:
|
||||
self.dispatch_table[type] = reduce_func
|
||||
|
||||
_LokyPickler = CustomizablePickler
|
||||
_loky_pickler_name = loky_pickler
|
||||
|
||||
|
||||
def get_loky_pickler_name():
|
||||
global _loky_pickler_name
|
||||
return _loky_pickler_name
|
||||
|
||||
|
||||
def get_loky_pickler():
|
||||
global _LokyPickler
|
||||
return _LokyPickler
|
||||
|
||||
|
||||
# Set it to its default value
|
||||
set_loky_pickler()
|
||||
|
||||
|
||||
def loads(buf):
|
||||
# Compat for python2.7 version
|
||||
if sys.version_info < (3, 3) and isinstance(buf, io.BytesIO):
|
||||
buf = buf.getvalue()
|
||||
return pickle_loads(buf)
|
||||
|
||||
|
||||
def dump(obj, file, reducers=None, protocol=None):
|
||||
'''Replacement for pickle.dump() using _LokyPickler.'''
|
||||
global _LokyPickler
|
||||
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
|
||||
|
||||
|
||||
def dumps(obj, reducers=None, protocol=None):
|
||||
global _LokyPickler
|
||||
|
||||
buf = io.BytesIO()
|
||||
dump(obj, buf, reducers=reducers, protocol=protocol)
|
||||
if sys.version_info < (3, 3):
|
||||
return buf.getvalue()
|
||||
return buf.getbuffer()
|
||||
|
||||
|
||||
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
|
||||
|
||||
if sys.platform == "win32":
|
||||
__all__ += ["duplicate"]
|
380
venv/Lib/site-packages/joblib/externals/loky/backend/resource_tracker.py
vendored
Normal file
380
venv/Lib/site-packages/joblib/externals/loky/backend/resource_tracker.py
vendored
Normal file
|
@ -0,0 +1,380 @@
|
|||
###############################################################################
|
||||
# Server process to keep track of unlinked resources, like folders and
|
||||
# semaphores and clean them.
|
||||
#
|
||||
# author: Thomas Moreau
|
||||
#
|
||||
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
|
||||
# * include custom spawnv_passfds to start the process
|
||||
# * use custom unlink from our own SemLock implementation
|
||||
# * add some VERBOSE logging
|
||||
#
|
||||
|
||||
#
|
||||
# On Unix we run a server process which keeps track of unlinked
|
||||
# resources. The server ignores SIGINT and SIGTERM and reads from a
|
||||
# pipe. The resource_tracker implements a reference counting scheme: each time
|
||||
# a Python process anticipates the shared usage of a resource by another
|
||||
# process, it signals the resource_tracker of this shared usage, and in return,
|
||||
# the resource_tracker increments the resource's reference count by 1.
|
||||
# Similarly, when access to a resource is closed by a Python process, the
|
||||
# process notifies the resource_tracker by asking it to decrement the
|
||||
# resource's reference count by 1. When the reference count drops to 0, the
|
||||
# resource_tracker attempts to clean up the underlying resource.
|
||||
|
||||
# Finally, every other process connected to the resource tracker has a copy of
|
||||
# the writable end of the pipe used to communicate with it, so the resource
|
||||
# tracker gets EOF when all other processes have exited. Then the
|
||||
# resource_tracker process unlinks any remaining leaked resources (with
|
||||
# reference count above 0)
|
||||
|
||||
# For semaphores, this is important because the system only supports a limited
|
||||
# number of named semaphores, and they will not be automatically removed till
|
||||
# the next reboot. Without this resource tracker process, "killall python"
|
||||
# would probably leave unlinked semaphores.
|
||||
|
||||
# Note that this behavior differs from CPython's resource_tracker, which only
|
||||
# implements list of shared resources, and not a proper refcounting scheme.
|
||||
# Also, CPython's resource tracker will only attempt to cleanup those shared
|
||||
# resources once all procsses connected to the resouce tracker have exited.
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import signal
|
||||
import warnings
|
||||
import threading
|
||||
|
||||
from . import spawn
|
||||
from multiprocessing import util
|
||||
|
||||
if sys.platform == "win32":
|
||||
from .compat_win32 import _winapi
|
||||
from .reduction import duplicate
|
||||
import msvcrt
|
||||
|
||||
try:
|
||||
from _multiprocessing import sem_unlink
|
||||
except ImportError:
|
||||
from .semlock import sem_unlink
|
||||
|
||||
if sys.version_info < (3,):
|
||||
BrokenPipeError = OSError
|
||||
from os import fdopen as open
|
||||
|
||||
__all__ = ['ensure_running', 'register', 'unregister']
|
||||
|
||||
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
|
||||
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
|
||||
|
||||
_CLEANUP_FUNCS = {
|
||||
'folder': shutil.rmtree,
|
||||
'file': os.unlink
|
||||
}
|
||||
|
||||
if os.name == "posix":
|
||||
_CLEANUP_FUNCS['semlock'] = sem_unlink
|
||||
|
||||
|
||||
VERBOSE = False
|
||||
|
||||
|
||||
class ResourceTracker(object):
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self._fd = None
|
||||
self._pid = None
|
||||
|
||||
def getfd(self):
|
||||
self.ensure_running()
|
||||
return self._fd
|
||||
|
||||
def ensure_running(self):
|
||||
'''Make sure that resource tracker process is running.
|
||||
|
||||
This can be run from any process. Usually a child process will use
|
||||
the resource created by its parent.'''
|
||||
with self._lock:
|
||||
if self._fd is not None:
|
||||
# resource tracker was launched before, is it still running?
|
||||
if self._check_alive():
|
||||
# => still alive
|
||||
return
|
||||
# => dead, launch it again
|
||||
os.close(self._fd)
|
||||
if os.name == "posix":
|
||||
try:
|
||||
# At this point, the resource_tracker process has been
|
||||
# killed or crashed. Let's remove the process entry
|
||||
# from the process table to avoid zombie processes.
|
||||
os.waitpid(self._pid, 0)
|
||||
except OSError:
|
||||
# The process was terminated or is a child from an
|
||||
# ancestor of the current process.
|
||||
pass
|
||||
self._fd = None
|
||||
self._pid = None
|
||||
|
||||
warnings.warn('resource_tracker: process died unexpectedly, '
|
||||
'relaunching. Some folders/sempahores might '
|
||||
'leak.')
|
||||
|
||||
fds_to_pass = []
|
||||
try:
|
||||
fds_to_pass.append(sys.stderr.fileno())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
r, w = os.pipe()
|
||||
if sys.platform == "win32":
|
||||
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
|
||||
os.close(r)
|
||||
r = _r
|
||||
|
||||
cmd = 'from {} import main; main({}, {})'.format(
|
||||
main.__module__, r, VERBOSE)
|
||||
try:
|
||||
fds_to_pass.append(r)
|
||||
# process will out live us, so no need to wait on pid
|
||||
exe = spawn.get_executable()
|
||||
args = [exe] + util._args_from_interpreter_flags()
|
||||
# In python 3.3, there is a bug which put `-RRRRR..` instead of
|
||||
# `-R` in args. Replace it to get the correct flags.
|
||||
# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488
|
||||
if sys.version_info[:2] <= (3, 3):
|
||||
import re
|
||||
for i in range(1, len(args)):
|
||||
args[i] = re.sub("-R+", "-R", args[i])
|
||||
args += ['-c', cmd]
|
||||
util.debug("launching resource tracker: {}".format(args))
|
||||
# bpo-33613: Register a signal mask that will block the
|
||||
# signals. This signal mask will be inherited by the child
|
||||
# that is going to be spawned and will protect the child from a
|
||||
# race condition that can make the child die before it
|
||||
# registers signal handlers for SIGINT and SIGTERM. The mask is
|
||||
# unregistered after spawning the child.
|
||||
try:
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(signal.SIG_BLOCK,
|
||||
_IGNORED_SIGNALS)
|
||||
pid = spawnv_passfds(exe, args, fds_to_pass)
|
||||
finally:
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK,
|
||||
_IGNORED_SIGNALS)
|
||||
except BaseException:
|
||||
os.close(w)
|
||||
raise
|
||||
else:
|
||||
self._fd = w
|
||||
self._pid = pid
|
||||
finally:
|
||||
if sys.platform == "win32":
|
||||
_winapi.CloseHandle(r)
|
||||
else:
|
||||
os.close(r)
|
||||
|
||||
def _check_alive(self):
|
||||
'''Check for the existence of the resource tracker process.'''
|
||||
try:
|
||||
self._send('PROBE', '', '')
|
||||
except BrokenPipeError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def register(self, name, rtype):
|
||||
'''Register a named resource, and increment its refcount.'''
|
||||
self.ensure_running()
|
||||
self._send('REGISTER', name, rtype)
|
||||
|
||||
def unregister(self, name, rtype):
|
||||
'''Unregister a named resource with resource tracker.'''
|
||||
self.ensure_running()
|
||||
self._send('UNREGISTER', name, rtype)
|
||||
|
||||
def maybe_unlink(self, name, rtype):
|
||||
'''Decrement the refcount of a resource, and delete it if it hits 0'''
|
||||
self.ensure_running()
|
||||
self._send("MAYBE_UNLINK", name, rtype)
|
||||
|
||||
def _send(self, cmd, name, rtype):
|
||||
msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
|
||||
if len(name) > 512:
|
||||
# posix guarantees that writes to a pipe of less than PIPE_BUF
|
||||
# bytes are atomic, and that PIPE_BUF >= 512
|
||||
raise ValueError('name too long')
|
||||
nbytes = os.write(self._fd, msg)
|
||||
assert nbytes == len(msg)
|
||||
|
||||
|
||||
_resource_tracker = ResourceTracker()
|
||||
ensure_running = _resource_tracker.ensure_running
|
||||
register = _resource_tracker.register
|
||||
maybe_unlink = _resource_tracker.maybe_unlink
|
||||
unregister = _resource_tracker.unregister
|
||||
getfd = _resource_tracker.getfd
|
||||
|
||||
|
||||
def main(fd, verbose=0):
|
||||
'''Run resource tracker.'''
|
||||
# protect the process from ^C and "killall python" etc
|
||||
if verbose:
|
||||
util.log_to_stderr(level=util.DEBUG)
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
|
||||
if _HAVE_SIGMASK:
|
||||
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
|
||||
|
||||
for f in (sys.stdin, sys.stdout):
|
||||
try:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if verbose:
|
||||
util.debug("Main resource tracker is running")
|
||||
|
||||
registry = {rtype: dict() for rtype in _CLEANUP_FUNCS.keys()}
|
||||
try:
|
||||
# keep track of registered/unregistered resources
|
||||
if sys.platform == "win32":
|
||||
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
|
||||
with open(fd, 'rb') as f:
|
||||
while True:
|
||||
line = f.readline()
|
||||
if line == b'': # EOF
|
||||
break
|
||||
try:
|
||||
splitted = line.strip().decode('ascii').split(':')
|
||||
# name can potentially contain separator symbols (for
|
||||
# instance folders on Windows)
|
||||
cmd, name, rtype = (
|
||||
splitted[0], ':'.join(splitted[1:-1]), splitted[-1])
|
||||
|
||||
if cmd == 'PROBE':
|
||||
continue
|
||||
|
||||
if rtype not in _CLEANUP_FUNCS:
|
||||
raise ValueError(
|
||||
'Cannot register {} for automatic cleanup: '
|
||||
'unknown resource type ({}). Resource type should '
|
||||
'be one of the following: {}'.format(
|
||||
name, rtype, list(_CLEANUP_FUNCS.keys())))
|
||||
|
||||
if cmd == 'REGISTER':
|
||||
if name not in registry[rtype]:
|
||||
registry[rtype][name] = 1
|
||||
else:
|
||||
registry[rtype][name] += 1
|
||||
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] incremented refcount of {} "
|
||||
"{} (current {})".format(
|
||||
rtype, name, registry[rtype][name]))
|
||||
elif cmd == 'UNREGISTER':
|
||||
del registry[rtype][name]
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] unregister {} {}: "
|
||||
"registry({})".format(name, rtype, len(registry)))
|
||||
elif cmd == 'MAYBE_UNLINK':
|
||||
registry[rtype][name] -= 1
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] decremented refcount of {} "
|
||||
"{} (current {})".format(
|
||||
rtype, name, registry[rtype][name]))
|
||||
|
||||
if registry[rtype][name] == 0:
|
||||
del registry[rtype][name]
|
||||
try:
|
||||
if verbose:
|
||||
util.debug(
|
||||
"[ResourceTracker] unlink {}"
|
||||
.format(name))
|
||||
_CLEANUP_FUNCS[rtype](name)
|
||||
except Exception as e:
|
||||
warnings.warn(
|
||||
'resource_tracker: %s: %r' % (name, e))
|
||||
|
||||
else:
|
||||
raise RuntimeError('unrecognized command %r' % cmd)
|
||||
except BaseException:
|
||||
try:
|
||||
sys.excepthook(*sys.exc_info())
|
||||
except BaseException:
|
||||
pass
|
||||
finally:
|
||||
# all processes have terminated; cleanup any remaining resources
|
||||
def _unlink_resources(rtype_registry, rtype):
|
||||
if rtype_registry:
|
||||
try:
|
||||
warnings.warn('resource_tracker: There appear to be %d '
|
||||
'leaked %s objects to clean up at shutdown' %
|
||||
(len(rtype_registry), rtype))
|
||||
except Exception:
|
||||
pass
|
||||
for name in rtype_registry:
|
||||
# For some reason the process which created and registered this
|
||||
# resource has failed to unregister it. Presumably it has
|
||||
# died. We therefore clean it up.
|
||||
try:
|
||||
_CLEANUP_FUNCS[rtype](name)
|
||||
if verbose:
|
||||
util.debug("[ResourceTracker] unlink {}"
|
||||
.format(name))
|
||||
except Exception as e:
|
||||
warnings.warn('resource_tracker: %s: %r' % (name, e))
|
||||
|
||||
for rtype, rtype_registry in registry.items():
|
||||
if rtype == "folder":
|
||||
continue
|
||||
else:
|
||||
_unlink_resources(rtype_registry, rtype)
|
||||
|
||||
# The default cleanup routine for folders deletes everything inside
|
||||
# those folders recursively, which can include other resources tracked
|
||||
# by the resource tracker). To limit the risk of the resource tracker
|
||||
# attempting to delete twice a resource (once as part of a tracked
|
||||
# folder, and once as a resource), we delete the folders after all
|
||||
# other resource types.
|
||||
if "folder" in registry:
|
||||
_unlink_resources(registry["folder"], "folder")
|
||||
|
||||
if verbose:
|
||||
util.debug("resource tracker shut down")
|
||||
|
||||
|
||||
#
|
||||
# Start a program with only specified fds kept open
|
||||
#
|
||||
|
||||
def spawnv_passfds(path, args, passfds):
|
||||
passfds = sorted(passfds)
|
||||
if sys.platform != "win32":
|
||||
errpipe_read, errpipe_write = os.pipe()
|
||||
try:
|
||||
from .reduction import _mk_inheritable
|
||||
_pass = []
|
||||
for fd in passfds:
|
||||
_pass += [_mk_inheritable(fd)]
|
||||
from .fork_exec import fork_exec
|
||||
return fork_exec(args, _pass)
|
||||
finally:
|
||||
os.close(errpipe_read)
|
||||
os.close(errpipe_write)
|
||||
else:
|
||||
cmd = ' '.join('"%s"' % x for x in args)
|
||||
try:
|
||||
hp, ht, pid, tid = _winapi.CreateProcess(
|
||||
path, cmd, None, None, True, 0, None, None, None)
|
||||
_winapi.CloseHandle(ht)
|
||||
except BaseException:
|
||||
pass
|
||||
return pid
|
274
venv/Lib/site-packages/joblib/externals/loky/backend/semlock.py
vendored
Normal file
274
venv/Lib/site-packages/joblib/externals/loky/backend/semlock.py
vendored
Normal file
|
@ -0,0 +1,274 @@
|
|||
###############################################################################
|
||||
# Ctypes implementation for posix semaphore.
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from cpython/Modules/_multiprocessing/semaphore.c (17/02/2017)
|
||||
# * use ctypes to access pthread semaphores and provide a full python
|
||||
# semaphore management.
|
||||
# * For OSX, as no sem_getvalue is not implemented, Semaphore with value > 1
|
||||
# are not guaranteed to work.
|
||||
# * Only work with LokyProcess on posix
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import errno
|
||||
import ctypes
|
||||
import tempfile
|
||||
import threading
|
||||
from ctypes.util import find_library
|
||||
|
||||
# As we need to use ctypes return types for semlock object, failure value
|
||||
# needs to be cast to proper python value. Unix failure convention is to
|
||||
# return 0, whereas OSX returns -1
|
||||
SEM_FAILURE = ctypes.c_void_p(0).value
|
||||
if sys.platform == 'darwin':
|
||||
SEM_FAILURE = ctypes.c_void_p(-1).value
|
||||
|
||||
# Semaphore types
|
||||
RECURSIVE_MUTEX = 0
|
||||
SEMAPHORE = 1
|
||||
|
||||
# Semaphore constants
|
||||
SEM_OFLAG = ctypes.c_int(os.O_CREAT | os.O_EXCL)
|
||||
SEM_PERM = ctypes.c_int(384)
|
||||
|
||||
|
||||
class timespec(ctypes.Structure):
|
||||
_fields_ = [("tv_sec", ctypes.c_long), ("tv_nsec", ctypes.c_long)]
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
pthread = ctypes.CDLL(find_library('pthread'), use_errno=True)
|
||||
pthread.sem_open.restype = ctypes.c_void_p
|
||||
pthread.sem_close.argtypes = [ctypes.c_void_p]
|
||||
pthread.sem_wait.argtypes = [ctypes.c_void_p]
|
||||
pthread.sem_trywait.argtypes = [ctypes.c_void_p]
|
||||
pthread.sem_post.argtypes = [ctypes.c_void_p]
|
||||
pthread.sem_getvalue.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
pthread.sem_unlink.argtypes = [ctypes.c_char_p]
|
||||
if sys.platform != "darwin":
|
||||
pthread.sem_timedwait.argtypes = [ctypes.c_void_p,
|
||||
ctypes.POINTER(timespec)]
|
||||
|
||||
try:
|
||||
from threading import get_ident
|
||||
except ImportError:
|
||||
def get_ident():
|
||||
return threading.current_thread().ident
|
||||
|
||||
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
class FileExistsError(OSError):
|
||||
pass
|
||||
|
||||
class FileNotFoundError(OSError):
|
||||
pass
|
||||
|
||||
|
||||
def sem_unlink(name):
|
||||
if pthread.sem_unlink(name.encode('ascii')) < 0:
|
||||
raiseFromErrno()
|
||||
|
||||
|
||||
def _sem_open(name, value=None):
|
||||
""" Construct or retrieve a semaphore with the given name
|
||||
|
||||
If value is None, try to retrieve an existing named semaphore.
|
||||
Else create a new semaphore with the given value
|
||||
"""
|
||||
if value is None:
|
||||
handle = pthread.sem_open(ctypes.c_char_p(name), 0)
|
||||
else:
|
||||
handle = pthread.sem_open(ctypes.c_char_p(name), SEM_OFLAG, SEM_PERM,
|
||||
ctypes.c_int(value))
|
||||
|
||||
if handle == SEM_FAILURE:
|
||||
e = ctypes.get_errno()
|
||||
if e == errno.EEXIST:
|
||||
raise FileExistsError("a semaphore named %s already exists" % name)
|
||||
elif e == errno.ENOENT:
|
||||
raise FileNotFoundError('cannot find semaphore named %s' % name)
|
||||
elif e == errno.ENOSYS:
|
||||
raise NotImplementedError('No semaphore implementation on this '
|
||||
'system')
|
||||
else:
|
||||
raiseFromErrno()
|
||||
|
||||
return handle
|
||||
|
||||
|
||||
def _sem_timedwait(handle, timeout):
|
||||
t_start = time.time()
|
||||
if sys.platform != "darwin":
|
||||
sec = int(timeout)
|
||||
tv_sec = int(t_start)
|
||||
nsec = int(1e9 * (timeout - sec) + .5)
|
||||
tv_nsec = int(1e9 * (t_start - tv_sec) + .5)
|
||||
deadline = timespec(sec+tv_sec, nsec+tv_nsec)
|
||||
deadline.tv_sec += int(deadline.tv_nsec / 1000000000)
|
||||
deadline.tv_nsec %= 1000000000
|
||||
return pthread.sem_timedwait(handle, ctypes.pointer(deadline))
|
||||
|
||||
# PERFORMANCE WARNING
|
||||
# No sem_timedwait on OSX so we implement our own method. This method can
|
||||
# degrade performances has the wait can have a latency up to 20 msecs
|
||||
deadline = t_start + timeout
|
||||
delay = 0
|
||||
now = time.time()
|
||||
while True:
|
||||
# Poll the sem file
|
||||
res = pthread.sem_trywait(handle)
|
||||
if res == 0:
|
||||
return 0
|
||||
else:
|
||||
e = ctypes.get_errno()
|
||||
if e != errno.EAGAIN:
|
||||
raiseFromErrno()
|
||||
|
||||
# check for timeout
|
||||
now = time.time()
|
||||
if now > deadline:
|
||||
ctypes.set_errno(errno.ETIMEDOUT)
|
||||
return -1
|
||||
|
||||
# calculate how much time left and check the delay is not too long
|
||||
# -- maximum is 20 msecs
|
||||
difference = (deadline - now)
|
||||
delay = min(delay, 20e-3, difference)
|
||||
|
||||
# Sleep and increase delay
|
||||
time.sleep(delay)
|
||||
delay += 1e-3
|
||||
|
||||
|
||||
class SemLock(object):
|
||||
"""ctypes wrapper to the unix semaphore"""
|
||||
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
|
||||
def __init__(self, kind, value, maxvalue, name=None, unlink_now=False):
|
||||
self.count = 0
|
||||
self.ident = 0
|
||||
self.kind = kind
|
||||
self.maxvalue = maxvalue
|
||||
self.name = name
|
||||
self.handle = _sem_open(self.name.encode('ascii'), value)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
res = pthread.sem_close(self.handle)
|
||||
assert res == 0, "Issue while closing semaphores"
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def _is_mine(self):
|
||||
return self.count > 0 and get_ident() == self.ident
|
||||
|
||||
def acquire(self, block=True, timeout=None):
|
||||
if self.kind == RECURSIVE_MUTEX and self._is_mine():
|
||||
self.count += 1
|
||||
return True
|
||||
|
||||
if block and timeout is None:
|
||||
res = pthread.sem_wait(self.handle)
|
||||
elif not block or timeout <= 0:
|
||||
res = pthread.sem_trywait(self.handle)
|
||||
else:
|
||||
res = _sem_timedwait(self.handle, timeout)
|
||||
if res < 0:
|
||||
e = ctypes.get_errno()
|
||||
if e == errno.EINTR:
|
||||
return None
|
||||
elif e in [errno.EAGAIN, errno.ETIMEDOUT]:
|
||||
return False
|
||||
raiseFromErrno()
|
||||
self.count += 1
|
||||
self.ident = get_ident()
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
if self.kind == RECURSIVE_MUTEX:
|
||||
assert self._is_mine(), (
|
||||
"attempt to release recursive lock not owned by thread")
|
||||
if self.count > 1:
|
||||
self.count -= 1
|
||||
return
|
||||
assert self.count == 1
|
||||
else:
|
||||
if sys.platform == 'darwin':
|
||||
# Handle broken get_value for mac ==> only Lock will work
|
||||
# as sem_get_value do not work properly
|
||||
if self.maxvalue == 1:
|
||||
if pthread.sem_trywait(self.handle) < 0:
|
||||
e = ctypes.get_errno()
|
||||
if e != errno.EAGAIN:
|
||||
raise OSError(e, errno.errorcode[e])
|
||||
else:
|
||||
if pthread.sem_post(self.handle) < 0:
|
||||
raiseFromErrno()
|
||||
else:
|
||||
raise ValueError(
|
||||
"semaphore or lock released too many times")
|
||||
else:
|
||||
import warnings
|
||||
warnings.warn("semaphore are broken on OSX, release might "
|
||||
"increase its maximal value", RuntimeWarning)
|
||||
else:
|
||||
value = self._get_value()
|
||||
if value >= self.maxvalue:
|
||||
raise ValueError(
|
||||
"semaphore or lock released too many times")
|
||||
|
||||
if pthread.sem_post(self.handle) < 0:
|
||||
raiseFromErrno()
|
||||
|
||||
self.count -= 1
|
||||
|
||||
def _get_value(self):
|
||||
value = ctypes.pointer(ctypes.c_int(-1))
|
||||
if pthread.sem_getvalue(self.handle, value) < 0:
|
||||
raiseFromErrno()
|
||||
return value.contents.value
|
||||
|
||||
def _count(self):
|
||||
return self.count
|
||||
|
||||
def _is_zero(self):
|
||||
if sys.platform == 'darwin':
|
||||
# Handle broken get_value for mac ==> only Lock will work
|
||||
# as sem_get_value do not work properly
|
||||
if pthread.sem_trywait(self.handle) < 0:
|
||||
e = ctypes.get_errno()
|
||||
if e == errno.EAGAIN:
|
||||
return True
|
||||
raise OSError(e, errno.errorcode[e])
|
||||
else:
|
||||
if pthread.sem_post(self.handle) < 0:
|
||||
raiseFromErrno()
|
||||
return False
|
||||
else:
|
||||
value = ctypes.pointer(ctypes.c_int(-1))
|
||||
if pthread.sem_getvalue(self.handle, value) < 0:
|
||||
raiseFromErrno()
|
||||
return value.contents.value == 0
|
||||
|
||||
def _after_fork(self):
|
||||
self.count = 0
|
||||
|
||||
@staticmethod
|
||||
def _rebuild(handle, kind, maxvalue, name):
|
||||
self = SemLock.__new__(SemLock)
|
||||
self.count = 0
|
||||
self.ident = 0
|
||||
self.kind = kind
|
||||
self.maxvalue = maxvalue
|
||||
self.name = name
|
||||
self.handle = _sem_open(name.encode('ascii'))
|
||||
return self
|
||||
|
||||
|
||||
def raiseFromErrno():
|
||||
e = ctypes.get_errno()
|
||||
raise OSError(e, errno.errorcode[e])
|
258
venv/Lib/site-packages/joblib/externals/loky/backend/spawn.py
vendored
Normal file
258
venv/Lib/site-packages/joblib/externals/loky/backend/spawn.py
vendored
Normal file
|
@ -0,0 +1,258 @@
|
|||
###############################################################################
|
||||
# Prepares and processes the data to setup the new process environment
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/spawn.py (17/02/2017)
|
||||
# * Improve logging data
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import runpy
|
||||
import types
|
||||
from multiprocessing import process, util
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
WINEXE = False
|
||||
WINSERVICE = False
|
||||
else:
|
||||
import msvcrt
|
||||
from .reduction import duplicate
|
||||
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
if WINSERVICE:
|
||||
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
|
||||
else:
|
||||
_python_exe = sys.executable
|
||||
|
||||
|
||||
def get_executable():
|
||||
return _python_exe
|
||||
|
||||
|
||||
def _check_not_importing_main():
|
||||
if getattr(process.current_process(), '_inheriting', False):
|
||||
raise RuntimeError('''
|
||||
An attempt has been made to start a new process before the
|
||||
current process has finished its bootstrapping phase.
|
||||
|
||||
This probably means that you are not using fork to start your
|
||||
child processes and you have forgotten to use the proper idiom
|
||||
in the main module:
|
||||
|
||||
if __name__ == '__main__':
|
||||
freeze_support()
|
||||
...
|
||||
|
||||
The "freeze_support()" line can be omitted if the program
|
||||
is not going to be frozen to produce an executable.''')
|
||||
|
||||
|
||||
def get_preparation_data(name, init_main_module=True):
|
||||
'''
|
||||
Return info about parent needed by child to unpickle process object
|
||||
'''
|
||||
_check_not_importing_main()
|
||||
d = dict(
|
||||
log_to_stderr=util._log_to_stderr,
|
||||
authkey=bytes(process.current_process().authkey),
|
||||
name=name,
|
||||
sys_argv=sys.argv,
|
||||
orig_dir=process.ORIGINAL_DIR,
|
||||
dir=os.getcwd()
|
||||
)
|
||||
|
||||
# Send sys_path and make sure the current directory will not be changed
|
||||
sys_path = [p for p in sys.path]
|
||||
try:
|
||||
i = sys_path.index('')
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
sys_path[i] = process.ORIGINAL_DIR
|
||||
d['sys_path'] = sys_path
|
||||
|
||||
# Make sure to pass the information if the multiprocessing logger is active
|
||||
if util._logger is not None:
|
||||
d['log_level'] = util._logger.getEffectiveLevel()
|
||||
if len(util._logger.handlers) > 0:
|
||||
h = util._logger.handlers[0]
|
||||
d['log_fmt'] = h.formatter._fmt
|
||||
|
||||
# Tell the child how to communicate with the resource_tracker
|
||||
from .resource_tracker import _resource_tracker
|
||||
_resource_tracker.ensure_running()
|
||||
d["tracker_args"] = {"pid": _resource_tracker._pid}
|
||||
if sys.platform == "win32":
|
||||
child_w = duplicate(
|
||||
msvcrt.get_osfhandle(_resource_tracker._fd), inheritable=True)
|
||||
d["tracker_args"]["fh"] = child_w
|
||||
else:
|
||||
d["tracker_args"]["fd"] = _resource_tracker._fd
|
||||
|
||||
if sys.version_info >= (3, 8) and os.name == 'posix':
|
||||
# joblib/loky#242: allow loky processes to retrieve the resource
|
||||
# tracker of their parent in case the child processes depickles
|
||||
# shared_memory objects, that are still tracked by multiprocessing's
|
||||
# resource_tracker by default.
|
||||
# XXX: this is a workaround that may be error prone: in the future, it
|
||||
# would be better to have loky subclass multiprocessing's shared_memory
|
||||
# to force registration of shared_memory segments via loky's
|
||||
# resource_tracker.
|
||||
from multiprocessing.resource_tracker import (
|
||||
_resource_tracker as mp_resource_tracker
|
||||
)
|
||||
# multiprocessing's resource_tracker must be running before loky
|
||||
# process is created (othewise the child won't be able to use it if it
|
||||
# is created later on)
|
||||
mp_resource_tracker.ensure_running()
|
||||
d["mp_tracker_args"] = {
|
||||
'fd': mp_resource_tracker._fd, 'pid': mp_resource_tracker._pid
|
||||
}
|
||||
|
||||
# Figure out whether to initialise main in the subprocess as a module
|
||||
# or through direct execution (or to leave it alone entirely)
|
||||
if init_main_module:
|
||||
main_module = sys.modules['__main__']
|
||||
try:
|
||||
main_mod_name = getattr(main_module.__spec__, "name", None)
|
||||
except BaseException:
|
||||
main_mod_name = None
|
||||
if main_mod_name is not None:
|
||||
d['init_main_from_name'] = main_mod_name
|
||||
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
|
||||
main_path = getattr(main_module, '__file__', None)
|
||||
if main_path is not None:
|
||||
if (not os.path.isabs(main_path) and
|
||||
process.ORIGINAL_DIR is not None):
|
||||
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
||||
d['init_main_from_path'] = os.path.normpath(main_path)
|
||||
# Compat for python2.7
|
||||
d['main_path'] = d['init_main_from_path']
|
||||
|
||||
return d
|
||||
|
||||
|
||||
#
|
||||
# Prepare current process
|
||||
#
|
||||
old_main_modules = []
|
||||
|
||||
|
||||
def prepare(data):
|
||||
'''
|
||||
Try to get current process ready to unpickle process object
|
||||
'''
|
||||
if 'name' in data:
|
||||
process.current_process().name = data['name']
|
||||
|
||||
if 'authkey' in data:
|
||||
process.current_process().authkey = data['authkey']
|
||||
|
||||
if 'log_to_stderr' in data and data['log_to_stderr']:
|
||||
util.log_to_stderr()
|
||||
|
||||
if 'log_level' in data:
|
||||
util.get_logger().setLevel(data['log_level'])
|
||||
|
||||
if 'log_fmt' in data:
|
||||
import logging
|
||||
util.get_logger().handlers[0].setFormatter(
|
||||
logging.Formatter(data['log_fmt'])
|
||||
)
|
||||
|
||||
if 'sys_path' in data:
|
||||
sys.path = data['sys_path']
|
||||
|
||||
if 'sys_argv' in data:
|
||||
sys.argv = data['sys_argv']
|
||||
|
||||
if 'dir' in data:
|
||||
os.chdir(data['dir'])
|
||||
|
||||
if 'orig_dir' in data:
|
||||
process.ORIGINAL_DIR = data['orig_dir']
|
||||
|
||||
if 'mp_tracker_args' in data:
|
||||
from multiprocessing.resource_tracker import (
|
||||
_resource_tracker as mp_resource_tracker
|
||||
)
|
||||
mp_resource_tracker._fd = data['mp_tracker_args']['fd']
|
||||
mp_resource_tracker._pid = data['mp_tracker_args']['pid']
|
||||
if 'tracker_args' in data:
|
||||
from .resource_tracker import _resource_tracker
|
||||
_resource_tracker._pid = data["tracker_args"]['pid']
|
||||
if sys.platform == 'win32':
|
||||
handle = data["tracker_args"]["fh"]
|
||||
_resource_tracker._fd = msvcrt.open_osfhandle(handle, 0)
|
||||
else:
|
||||
_resource_tracker._fd = data["tracker_args"]["fd"]
|
||||
|
||||
if 'init_main_from_name' in data:
|
||||
_fixup_main_from_name(data['init_main_from_name'])
|
||||
elif 'init_main_from_path' in data:
|
||||
_fixup_main_from_path(data['init_main_from_path'])
|
||||
|
||||
|
||||
# Multiprocessing module helpers to fix up the main module in
|
||||
# spawned subprocesses
|
||||
def _fixup_main_from_name(mod_name):
|
||||
# __main__.py files for packages, directories, zip archives, etc, run
|
||||
# their "main only" code unconditionally, so we don't even try to
|
||||
# populate anything in __main__, nor do we make any changes to
|
||||
# __main__ attributes
|
||||
current_main = sys.modules['__main__']
|
||||
if mod_name == "__main__" or mod_name.endswith(".__main__"):
|
||||
return
|
||||
|
||||
# If this process was forked, __main__ may already be populated
|
||||
if getattr(current_main.__spec__, "name", None) == mod_name:
|
||||
return
|
||||
|
||||
# Otherwise, __main__ may contain some non-main code where we need to
|
||||
# support unpickling it properly. We rerun it as __mp_main__ and make
|
||||
# the normal __main__ an alias to that
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_module(mod_name,
|
||||
run_name="__mp_main__",
|
||||
alter_sys=True)
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
||||
|
||||
|
||||
def _fixup_main_from_path(main_path):
|
||||
# If this process was forked, __main__ may already be populated
|
||||
current_main = sys.modules['__main__']
|
||||
|
||||
# Unfortunately, the main ipython launch script historically had no
|
||||
# "if __name__ == '__main__'" guard, so we work around that
|
||||
# by treating it like a __main__.py file
|
||||
# See https://github.com/ipython/ipython/issues/4698
|
||||
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
||||
if main_name == 'ipython':
|
||||
return
|
||||
|
||||
# Otherwise, if __file__ already has the setting we expect,
|
||||
# there's nothing more to do
|
||||
if getattr(current_main, '__file__', None) == main_path:
|
||||
return
|
||||
|
||||
# If the parent process has sent a path through rather than a module
|
||||
# name we assume it is an executable script that may contain
|
||||
# non-main code that needs to be executed
|
||||
old_main_modules.append(current_main)
|
||||
main_module = types.ModuleType("__mp_main__")
|
||||
main_content = runpy.run_path(main_path,
|
||||
run_name="__mp_main__")
|
||||
main_module.__dict__.update(main_content)
|
||||
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
||||
|
||||
|
||||
def import_main_path(main_path):
|
||||
'''
|
||||
Set sys.modules['__main__'] to module at main_path
|
||||
'''
|
||||
_fixup_main_from_path(main_path)
|
381
venv/Lib/site-packages/joblib/externals/loky/backend/synchronize.py
vendored
Normal file
381
venv/Lib/site-packages/joblib/externals/loky/backend/synchronize.py
vendored
Normal file
|
@ -0,0 +1,381 @@
|
|||
###############################################################################
|
||||
# Synchronization primitives based on our SemLock implementation
|
||||
#
|
||||
# author: Thomas Moreau and Olivier Grisel
|
||||
#
|
||||
# adapted from multiprocessing/synchronize.py (17/02/2017)
|
||||
# * Remove ctx argument for compatibility reason
|
||||
# * Implementation of Condition/Event are necessary for compatibility
|
||||
# with python2.7/3.3, Barrier should be reimplemented to for those
|
||||
# version (but it is not used in loky).
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import _multiprocessing
|
||||
from time import time as _time
|
||||
|
||||
from .context import assert_spawning
|
||||
from . import resource_tracker
|
||||
from multiprocessing import process
|
||||
from multiprocessing import util
|
||||
|
||||
__all__ = [
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
|
||||
]
|
||||
# Try to import the mp.synchronize module cleanly, if it fails
|
||||
# raise ImportError for platforms lacking a working sem_open implementation.
|
||||
# See issue 3770
|
||||
try:
|
||||
if sys.version_info < (3, 4):
|
||||
from .semlock import SemLock as _SemLock
|
||||
from .semlock import sem_unlink
|
||||
else:
|
||||
from _multiprocessing import SemLock as _SemLock
|
||||
from _multiprocessing import sem_unlink
|
||||
except (ImportError):
|
||||
raise ImportError("This platform lacks a functioning sem_open" +
|
||||
" implementation, therefore, the required" +
|
||||
" synchronization primitives needed will not" +
|
||||
" function, see issue 3770.")
|
||||
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
FileExistsError = OSError
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
|
||||
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
|
||||
|
||||
|
||||
#
|
||||
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
|
||||
#
|
||||
|
||||
class SemLock(object):
|
||||
|
||||
_rand = tempfile._RandomNameSequence()
|
||||
|
||||
def __init__(self, kind, value, maxvalue):
|
||||
# unlink_now is only used on win32 or when we are using fork.
|
||||
unlink_now = False
|
||||
for i in range(100):
|
||||
try:
|
||||
self._semlock = _SemLock(
|
||||
kind, value, maxvalue, SemLock._make_name(),
|
||||
unlink_now)
|
||||
except FileExistsError: # pragma: no cover
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else: # pragma: no cover
|
||||
raise FileExistsError('cannot find name for semaphore')
|
||||
|
||||
util.debug('created semlock with handle %s and name "%s"'
|
||||
% (self._semlock.handle, self._semlock.name))
|
||||
|
||||
self._make_methods()
|
||||
|
||||
def _after_fork(obj):
|
||||
obj._semlock._after_fork()
|
||||
|
||||
util.register_after_fork(self, _after_fork)
|
||||
|
||||
# When the object is garbage collected or the
|
||||
# process shuts down we unlink the semaphore name
|
||||
resource_tracker.register(self._semlock.name, "semlock")
|
||||
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
|
||||
exitpriority=0)
|
||||
|
||||
@staticmethod
|
||||
def _cleanup(name):
|
||||
sem_unlink(name)
|
||||
resource_tracker.unregister(name, "semlock")
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._semlock.acquire
|
||||
self.release = self._semlock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._semlock.acquire()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._semlock.release()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
sl = self._semlock
|
||||
h = sl.handle
|
||||
return (h, sl.kind, sl.maxvalue, sl.name)
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._semlock = _SemLock._rebuild(*state)
|
||||
util.debug('recreated blocker with handle %r and name "%s"'
|
||||
% (state[0], state[3]))
|
||||
self._make_methods()
|
||||
|
||||
@staticmethod
|
||||
def _make_name():
|
||||
# OSX does not support long names for semaphores
|
||||
return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
|
||||
|
||||
|
||||
#
|
||||
# Semaphore
|
||||
#
|
||||
|
||||
class Semaphore(SemLock):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
|
||||
|
||||
def get_value(self):
|
||||
if sys.platform == 'darwin':
|
||||
raise NotImplementedError("OSX does not implement sem_getvalue")
|
||||
return self._semlock._get_value()
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<%s(value=%s)>' % (self.__class__.__name__, value)
|
||||
|
||||
|
||||
#
|
||||
# Bounded semaphore
|
||||
#
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, value)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<%s(value=%s, maxvalue=%s)>' % \
|
||||
(self.__class__.__name__, value, self._semlock.maxvalue)
|
||||
|
||||
|
||||
#
|
||||
# Non-recursive lock
|
||||
#
|
||||
|
||||
class Lock(SemLock):
|
||||
|
||||
def __init__(self):
|
||||
super(Lock, self).__init__(SEMAPHORE, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
elif self._semlock._get_value() == 1:
|
||||
name = 'None'
|
||||
elif self._semlock._count() > 0:
|
||||
name = 'SomeOtherThread'
|
||||
else:
|
||||
name = 'SomeOtherProcess'
|
||||
except Exception:
|
||||
name = 'unknown'
|
||||
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
|
||||
|
||||
|
||||
#
|
||||
# Recursive lock
|
||||
#
|
||||
|
||||
class RLock(SemLock):
|
||||
|
||||
def __init__(self):
|
||||
super(RLock, self).__init__(RECURSIVE_MUTEX, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = process.current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
count = self._semlock._count()
|
||||
elif self._semlock._get_value() == 1:
|
||||
name, count = 'None', 0
|
||||
elif self._semlock._count() > 0:
|
||||
name, count = 'SomeOtherThread', 'nonzero'
|
||||
else:
|
||||
name, count = 'SomeOtherProcess', 'nonzero'
|
||||
except Exception:
|
||||
name, count = 'unknown', 'unknown'
|
||||
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
|
||||
|
||||
|
||||
#
|
||||
# Condition variable
|
||||
#
|
||||
|
||||
class Condition(object):
|
||||
|
||||
def __init__(self, lock=None):
|
||||
self._lock = lock or RLock()
|
||||
self._sleeping_count = Semaphore(0)
|
||||
self._woken_count = Semaphore(0)
|
||||
self._wait_semaphore = Semaphore(0)
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore) = state
|
||||
self._make_methods()
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
num_waiters = (self._sleeping_count._semlock._get_value() -
|
||||
self._woken_count._semlock._get_value())
|
||||
except Exception:
|
||||
num_waiters = 'unknown'
|
||||
return '<%s(%s, %s)>' % (self.__class__.__name__,
|
||||
self._lock, num_waiters)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
assert self._lock._semlock._is_mine(), \
|
||||
'must acquire() condition before using wait()'
|
||||
|
||||
# indicate that this thread is going to sleep
|
||||
self._sleeping_count.release()
|
||||
|
||||
# release lock
|
||||
count = self._lock._semlock._count()
|
||||
for i in range(count):
|
||||
self._lock.release()
|
||||
|
||||
try:
|
||||
# wait for notification or timeout
|
||||
return self._wait_semaphore.acquire(True, timeout)
|
||||
finally:
|
||||
# indicate that this thread has woken
|
||||
self._woken_count.release()
|
||||
|
||||
# reacquire lock
|
||||
for i in range(count):
|
||||
self._lock.acquire()
|
||||
|
||||
def notify(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
self._woken_count.acquire() # wait for the sleeper to wake
|
||||
|
||||
# rezero _wait_semaphore in case a timeout just happened
|
||||
self._wait_semaphore.acquire(False)
|
||||
|
||||
def notify_all(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify*() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
sleepers = 0
|
||||
while self._sleeping_count.acquire(False):
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
sleepers += 1
|
||||
|
||||
if sleepers:
|
||||
for i in range(sleepers):
|
||||
self._woken_count.acquire() # wait for a sleeper to wake
|
||||
|
||||
# rezero wait_semaphore in case some timeouts just happened
|
||||
while self._wait_semaphore.acquire(False):
|
||||
pass
|
||||
|
||||
def wait_for(self, predicate, timeout=None):
|
||||
result = predicate()
|
||||
if result:
|
||||
return result
|
||||
if timeout is not None:
|
||||
endtime = _time() + timeout
|
||||
else:
|
||||
endtime = None
|
||||
waittime = None
|
||||
while not result:
|
||||
if endtime is not None:
|
||||
waittime = endtime - _time()
|
||||
if waittime <= 0:
|
||||
break
|
||||
self.wait(waittime)
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
|
||||
#
|
||||
# Event
|
||||
#
|
||||
|
||||
class Event(object):
|
||||
|
||||
def __init__(self):
|
||||
self._cond = Condition(Lock())
|
||||
self._flag = Semaphore(0)
|
||||
|
||||
def is_set(self):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
|
||||
def set(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
self._flag.release()
|
||||
self._cond.notify_all()
|
||||
|
||||
def clear(self):
|
||||
with self._cond:
|
||||
self._flag.acquire(False)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
with self._cond:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
else:
|
||||
self._cond.wait(timeout)
|
||||
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
172
venv/Lib/site-packages/joblib/externals/loky/backend/utils.py
vendored
Normal file
172
venv/Lib/site-packages/joblib/externals/loky/backend/utils.py
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import errno
|
||||
import signal
|
||||
import warnings
|
||||
import threading
|
||||
import subprocess
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
psutil = None
|
||||
|
||||
|
||||
WIN32 = sys.platform == "win32"
|
||||
|
||||
|
||||
def _flag_current_thread_clean_exit():
|
||||
"""Put a ``_clean_exit`` flag on the current thread"""
|
||||
thread = threading.current_thread()
|
||||
thread._clean_exit = True
|
||||
|
||||
|
||||
def recursive_terminate(process, use_psutil=True):
|
||||
if use_psutil and psutil is not None:
|
||||
_recursive_terminate_with_psutil(process)
|
||||
else:
|
||||
_recursive_terminate_without_psutil(process)
|
||||
|
||||
|
||||
def _recursive_terminate_with_psutil(process, retries=5):
|
||||
try:
|
||||
children = psutil.Process(process.pid).children(recursive=True)
|
||||
except psutil.NoSuchProcess:
|
||||
return
|
||||
|
||||
# Kill the children in reverse order to avoid killing the parents before
|
||||
# the children in cases where there are more processes nested.
|
||||
for child in children[::-1]:
|
||||
try:
|
||||
child.kill()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
process.terminate()
|
||||
process.join()
|
||||
|
||||
|
||||
def _recursive_terminate_without_psutil(process):
|
||||
"""Terminate a process and its descendants.
|
||||
"""
|
||||
try:
|
||||
_recursive_terminate(process.pid)
|
||||
except OSError as e:
|
||||
warnings.warn("Failed to kill subprocesses on this platform. Please"
|
||||
"install psutil: https://github.com/giampaolo/psutil")
|
||||
# In case we cannot introspect the children, we fall back to the
|
||||
# classic Process.terminate.
|
||||
process.terminate()
|
||||
process.join()
|
||||
|
||||
|
||||
def _recursive_terminate(pid):
|
||||
"""Recursively kill the descendants of a process before killing it.
|
||||
"""
|
||||
|
||||
if sys.platform == "win32":
|
||||
# On windows, the taskkill function with option `/T` terminate a given
|
||||
# process pid and its children.
|
||||
try:
|
||||
subprocess.check_output(
|
||||
["taskkill", "/F", "/T", "/PID", str(pid)],
|
||||
stderr=None)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# In windows, taskkill return 1 for permission denied and 128, 255
|
||||
# for no process found.
|
||||
if e.returncode not in [1, 128, 255]:
|
||||
raise
|
||||
elif e.returncode == 1:
|
||||
# Try to kill the process without its descendants if taskkill
|
||||
# was denied permission. If this fails too, with an error
|
||||
# different from process not found, let the top level function
|
||||
# raise a warning and retry to kill the process.
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
else:
|
||||
try:
|
||||
children_pids = subprocess.check_output(
|
||||
["pgrep", "-P", str(pid)],
|
||||
stderr=None
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# `ps` returns 1 when no child process has been found
|
||||
if e.returncode == 1:
|
||||
children_pids = b''
|
||||
else:
|
||||
raise
|
||||
|
||||
# Decode the result, split the cpid and remove the trailing line
|
||||
children_pids = children_pids.decode().split('\n')[:-1]
|
||||
for cpid in children_pids:
|
||||
cpid = int(cpid)
|
||||
_recursive_terminate(cpid)
|
||||
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as e:
|
||||
# if OSError is raised with [Errno 3] no such process, the process
|
||||
# is already terminated, else, raise the error and let the top
|
||||
# level function raise a warning and retry to kill the process.
|
||||
if e.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
|
||||
def get_exitcodes_terminated_worker(processes):
|
||||
"""Return a formated string with the exitcodes of terminated workers.
|
||||
|
||||
If necessary, wait (up to .25s) for the system to correctly set the
|
||||
exitcode of one terminated worker.
|
||||
"""
|
||||
patience = 5
|
||||
|
||||
# Catch the exitcode of the terminated workers. There should at least be
|
||||
# one. If not, wait a bit for the system to correctly set the exitcode of
|
||||
# the terminated worker.
|
||||
exitcodes = [p.exitcode for p in list(processes.values())
|
||||
if p.exitcode is not None]
|
||||
while len(exitcodes) == 0 and patience > 0:
|
||||
patience -= 1
|
||||
exitcodes = [p.exitcode for p in list(processes.values())
|
||||
if p.exitcode is not None]
|
||||
time.sleep(.05)
|
||||
|
||||
return _format_exitcodes(exitcodes)
|
||||
|
||||
|
||||
def _format_exitcodes(exitcodes):
|
||||
"""Format a list of exit code with names of the signals if possible"""
|
||||
str_exitcodes = ["{}({})".format(_get_exitcode_name(e), e)
|
||||
for e in exitcodes if e is not None]
|
||||
return "{" + ", ".join(str_exitcodes) + "}"
|
||||
|
||||
|
||||
def _get_exitcode_name(exitcode):
|
||||
if sys.platform == "win32":
|
||||
# The exitcode are unreliable on windows (see bpo-31863).
|
||||
# For this case, return UNKNOWN
|
||||
return "UNKNOWN"
|
||||
|
||||
if exitcode < 0:
|
||||
try:
|
||||
import signal
|
||||
if sys.version_info > (3, 5):
|
||||
return signal.Signals(-exitcode).name
|
||||
|
||||
# construct an inverse lookup table
|
||||
for v, k in signal.__dict__.items():
|
||||
if (v.startswith('SIG') and not v.startswith('SIG_') and
|
||||
k == -exitcode):
|
||||
return v
|
||||
except ValueError:
|
||||
return "UNKNOWN"
|
||||
elif exitcode != 255:
|
||||
# The exitcode are unreliable on forkserver were 255 is always returned
|
||||
# (see bpo-30589). For this case, return UNKNOWN
|
||||
return "EXIT"
|
||||
|
||||
return "UNKNOWN"
|
Loading…
Add table
Add a link
Reference in a new issue