Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,2 @@
from ._version import version_info, __version__, kernel_protocol_version_info, kernel_protocol_version
from .connect import *

View file

@ -0,0 +1,3 @@
if __name__ == '__main__':
from ipykernel import kernelapp as app
app.launch_new_instance()

View file

@ -0,0 +1,153 @@
"""Eventloop hook for OS X
Calls NSApp / CoreFoundation APIs via ctypes.
"""
# cribbed heavily from IPython.terminal.pt_inputhooks.osx
# obj-c boilerplate from appnope, used under BSD 2-clause
import ctypes
import ctypes.util
from threading import Event
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc'))
void_p = ctypes.c_void_p
objc.objc_getClass.restype = void_p
objc.sel_registerName.restype = void_p
objc.objc_msgSend.restype = void_p
objc.objc_msgSend.argtypes = [void_p, void_p]
msg = objc.objc_msgSend
def _utf8(s):
"""ensure utf8 bytes"""
if not isinstance(s, bytes):
s = s.encode('utf8')
return s
def n(name):
"""create a selector name (for ObjC methods)"""
return objc.sel_registerName(_utf8(name))
def C(classname):
"""get an ObjC Class by name"""
return objc.objc_getClass(_utf8(classname))
# end obj-c boilerplate from appnope
# CoreFoundation C-API calls we will use:
CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
CFAbsoluteTimeGetCurrent = CoreFoundation.CFAbsoluteTimeGetCurrent
CFAbsoluteTimeGetCurrent.restype = ctypes.c_double
CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
CFRunLoopGetCurrent.restype = void_p
CFRunLoopGetMain = CoreFoundation.CFRunLoopGetMain
CFRunLoopGetMain.restype = void_p
CFRunLoopStop = CoreFoundation.CFRunLoopStop
CFRunLoopStop.restype = None
CFRunLoopStop.argtypes = [void_p]
CFRunLoopTimerCreate = CoreFoundation.CFRunLoopTimerCreate
CFRunLoopTimerCreate.restype = void_p
CFRunLoopTimerCreate.argtypes = [
void_p, # allocator (NULL)
ctypes.c_double, # fireDate
ctypes.c_double, # interval
ctypes.c_int, # flags (0)
ctypes.c_int, # order (0)
void_p, # callout
void_p, # context
]
CFRunLoopAddTimer = CoreFoundation.CFRunLoopAddTimer
CFRunLoopAddTimer.restype = None
CFRunLoopAddTimer.argtypes = [ void_p, void_p, void_p ]
kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')
def _NSApp():
"""Return the global NSApplication instance (NSApp)"""
return msg(C('NSApplication'), n('sharedApplication'))
def _wake(NSApp):
"""Wake the Application"""
event = msg(C('NSEvent'),
n('otherEventWithType:location:modifierFlags:'
'timestamp:windowNumber:context:subtype:data1:data2:'),
15, # Type
0, # location
0, # flags
0, # timestamp
0, # window
None, # context
0, # subtype
0, # data1
0, # data2
)
msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
_triggered = Event()
def stop(timer=None, loop=None):
"""Callback to fire when there's input to be read"""
_triggered.set()
NSApp = _NSApp()
# if NSApp is not running, stop CFRunLoop directly,
# otherwise stop and wake NSApp
if msg(NSApp, n('isRunning')):
msg(NSApp, n('stop:'), NSApp)
_wake(NSApp)
else:
CFRunLoopStop(CFRunLoopGetCurrent())
_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p)
_c_stop_callback = _c_callback_func_type(stop)
def _stop_after(delay):
"""Register callback to stop eventloop after a delay"""
timer = CFRunLoopTimerCreate(
None, # allocator
CFAbsoluteTimeGetCurrent() + delay, # fireDate
0, # interval
0, # flags
0, # order
_c_stop_callback,
None,
)
CFRunLoopAddTimer(
CFRunLoopGetMain(),
timer,
kCFRunLoopCommonModes,
)
def mainloop(duration=1):
"""run the Cocoa eventloop for the specified duration (seconds)"""
_triggered.clear()
NSApp = _NSApp()
_stop_after(duration)
msg(NSApp, n('run'))
if not _triggered.is_set():
# app closed without firing callback,
# probably due to last window being closed.
# Run the loop manually in this case,
# since there may be events still to process (ipython/ipython#9734)
CoreFoundation.CFRunLoopRun()

View file

@ -0,0 +1,16 @@
version_info = (5, 3, 4)
__version__ = '.'.join(map(str, version_info[:3]))
# pep440 is annoying, beta/alpha/rc should _not_ have dots or pip/setuptools
# confuses which one between the wheel and sdist is the most recent.
if len(version_info) == 4:
extra = version_info[3]
if extra.startswith(('a','b','rc')):
__version__ = __version__+extra
else:
__version__ = __version__+'.'+extra
if len(version_info) > 4:
raise NotImplementedError
kernel_protocol_version_info = (5, 3)
kernel_protocol_version = '%s.%s' % kernel_protocol_version_info

View file

@ -0,0 +1,40 @@
# encoding: utf-8
"""Utilities to enable code objects to be pickled.
Any process that import this module will be able to pickle code objects. This
includes the func_code attribute of any function. Once unpickled, new
functions can be built using new.function(code, globals()). Eventually
we need to automate all of this so that functions themselves can be pickled.
Reference: A. Tremols, P Cogolo, "Python Cookbook," p 302-305
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
warnings.warn("ipykernel.codeutil is deprecated since IPykernel 4.3.1. It has moved to ipyparallel.serialize", DeprecationWarning)
import sys
import types
try:
import copyreg # Py 3
except ImportError:
import copy_reg as copyreg # Py 2
def code_ctor(*args):
return types.CodeType(*args)
def reduce_code(co):
args = [co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, co.co_consts, co.co_names,
co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab, co.co_freevars, co.co_cellvars]
if sys.version_info[0] >= 3:
args.insert(1, co.co_kwonlyargcount)
if sys.version_info > (3, 8, 0, 'alpha', 3):
args.insert(1, co.co_posonlyargcount)
return code_ctor, tuple(args)
copyreg.pickle(types.CodeType, reduce_code)

View file

@ -0,0 +1,2 @@
from .manager import *
from .comm import *

View file

@ -0,0 +1,166 @@
"""Base class for a Comm"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
from traitlets.config import LoggingConfigurable
from ipykernel.kernelbase import Kernel
from ipykernel.jsonutil import json_clean
from traitlets import Instance, Unicode, Bytes, Bool, Dict, Any, default
class Comm(LoggingConfigurable):
"""Class for communicating between a Frontend and a Kernel"""
kernel = Instance('ipykernel.kernelbase.Kernel', allow_none=True)
@default('kernel')
def _default_kernel(self):
if Kernel.initialized():
return Kernel.instance()
comm_id = Unicode()
@default('comm_id')
def _default_comm_id(self):
return uuid.uuid4().hex
primary = Bool(True, help="Am I the primary or secondary Comm?")
target_name = Unicode('comm')
target_module = Unicode(None, allow_none=True, help="""requirejs module from
which to load comm target.""")
topic = Bytes()
@default('topic')
def _default_topic(self):
return ('comm-%s' % self.comm_id).encode('ascii')
_open_data = Dict(help="data dict, if any, to be included in comm_open")
_close_data = Dict(help="data dict, if any, to be included in comm_close")
_msg_callback = Any()
_close_callback = Any()
_closed = Bool(True)
def __init__(self, target_name='', data=None, metadata=None, buffers=None, **kwargs):
if target_name:
kwargs['target_name'] = target_name
super(Comm, self).__init__(**kwargs)
if self.kernel:
if self.primary:
# I am primary, open my peer.
self.open(data=data, metadata=metadata, buffers=buffers)
else:
self._closed = False
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
"""Helper for sending a comm message on IOPub"""
data = {} if data is None else data
metadata = {} if metadata is None else metadata
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
self.kernel.session.send(self.kernel.iopub_socket, msg_type,
content,
metadata=json_clean(metadata),
parent=self.kernel._parent_header,
ident=self.topic,
buffers=buffers,
)
def __del__(self):
"""trigger close on gc"""
self.close(deleting=True)
# publishing messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the frontend-side version of this comm"""
if data is None:
data = self._open_data
comm_manager = getattr(self.kernel, 'comm_manager', None)
if comm_manager is None:
raise RuntimeError("Comms cannot be opened without a kernel "
"and a comm_manager attached to that kernel.")
comm_manager.register_comm(self)
try:
self._publish_msg('comm_open',
data=data, metadata=metadata, buffers=buffers,
target_name=self.target_name,
target_module=self.target_module,
)
self._closed = False
except:
comm_manager.unregister_comm(self)
raise
def close(self, data=None, metadata=None, buffers=None, deleting=False):
"""Close the frontend-side version of this comm"""
if self._closed:
# only close once
return
self._closed = True
# nothing to send if we have no kernel
# can be None during interpreter cleanup
if not self.kernel:
return
if data is None:
data = self._close_data
self._publish_msg('comm_close',
data=data, metadata=metadata, buffers=buffers,
)
if not deleting:
# If deleting, the comm can't be registered
self.kernel.comm_manager.unregister_comm(self)
def send(self, data=None, metadata=None, buffers=None):
"""Send a message to the frontend-side version of this comm"""
self._publish_msg('comm_msg',
data=data, metadata=metadata, buffers=buffers,
)
# registering callbacks
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
# handling of incoming messages
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
self._close_callback(msg)
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
shell = self.kernel.shell
if shell:
shell.events.trigger('pre_execute')
self._msg_callback(msg)
if shell:
shell.events.trigger('post_execute')
__all__ = ['Comm']

View file

@ -0,0 +1,131 @@
"""Base class to manage comms"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import logging
from traitlets.config import LoggingConfigurable
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
from traitlets import Instance, Unicode, Dict, Any, default
from .comm import Comm
class CommManager(LoggingConfigurable):
"""Manager for Comms in the Kernel"""
kernel = Instance('ipykernel.kernelbase.Kernel')
comms = Dict()
targets = Dict()
# Public APIs
def register_target(self, target_name, f):
"""Register a callable f for a given target name
f will be called with two arguments when a comm_open message is received with `target`:
- the Comm instance
- the `comm_open` message itself.
f can be a Python callable or an import string for one.
"""
if isinstance(f, string_types):
f = import_item(f)
self.targets[target_name] = f
def unregister_target(self, target_name, f):
"""Unregister a callable registered with register_target"""
return self.targets.pop(target_name)
def register_comm(self, comm):
"""Register a new comm"""
comm_id = comm.comm_id
comm.kernel = self.kernel
self.comms[comm_id] = comm
return comm_id
def unregister_comm(self, comm):
"""Unregister a comm, and close its counterpart"""
# unlike get_comm, this should raise a KeyError
comm = self.comms.pop(comm.comm_id)
def get_comm(self, comm_id):
"""Get a comm with a particular id
Returns the comm if found, otherwise None.
This will not raise an error,
it will log messages if the comm cannot be found.
"""
try:
return self.comms[comm_id]
except KeyError:
self.log.warning("No such comm: %s", comm_id)
if self.log.isEnabledFor(logging.DEBUG):
# don't create the list of keys if debug messages aren't enabled
self.log.debug("Current comms: %s", list(self.comms.keys()))
# Message handlers
def comm_open(self, stream, ident, msg):
"""Handler for comm_open messages"""
content = msg['content']
comm_id = content['comm_id']
target_name = content['target_name']
f = self.targets.get(target_name, None)
comm = Comm(comm_id=comm_id,
primary=False,
target_name=target_name,
)
self.register_comm(comm)
if f is None:
self.log.error("No such comm target registered: %s", target_name)
else:
try:
f(comm, msg)
return
except Exception:
self.log.error("Exception opening comm with target: %s", target_name, exc_info=True)
# Failure.
try:
comm.close()
except:
self.log.error("""Could not close comm during `comm_open` failure
clean-up. The comm may not have been opened yet.""", exc_info=True)
def comm_msg(self, stream, ident, msg):
"""Handler for comm_msg messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
return
try:
comm.handle_msg(msg)
except Exception:
self.log.error('Exception in comm_msg for %s', comm_id, exc_info=True)
def comm_close(self, stream, ident, msg):
"""Handler for comm_close messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
return
self.comms[comm_id]._closed = True
del self.comms[comm_id]
try:
comm.handle_close(msg)
except Exception:
self.log.error('Exception in comm_close for %s', comm_id, exc_info=True)
__all__ = ['CommManager']

View file

@ -0,0 +1,190 @@
"""Connection file-related utilities for the kernel
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import json
import sys
from subprocess import Popen, PIPE
import warnings
from IPython.core.profiledir import ProfileDir
from IPython.paths import get_ipython_dir
from ipython_genutils.path import filefind
from ipython_genutils.py3compat import str_to_bytes, PY3
import jupyter_client
from jupyter_client import write_connection_file
def get_connection_file(app=None):
"""Return the path to the connection file of an app
Parameters
----------
app : IPKernelApp instance [optional]
If unspecified, the currently running app will be used
"""
if app is None:
from ipykernel.kernelapp import IPKernelApp
if not IPKernelApp.initialized():
raise RuntimeError("app not specified, and not in a running Kernel")
app = IPKernelApp.instance()
return filefind(app.connection_file, ['.', app.connection_dir])
def find_connection_file(filename='kernel-*.json', profile=None):
"""DEPRECATED: find a connection file, and return its absolute path.
THIS FUNCTION IS DEPRECATED. Use jupyter_client.find_connection_file instead.
Parameters
----------
filename : str
The connection file or fileglob to search for.
profile : str [optional]
The name of the profile to use when searching for the connection file,
if different from the current IPython session or 'default'.
Returns
-------
str : The absolute path of the connection file.
"""
import warnings
warnings.warn("""ipykernel.find_connection_file is deprecated, use jupyter_client.find_connection_file""",
DeprecationWarning, stacklevel=2)
from IPython.core.application import BaseIPythonApplication as IPApp
try:
# quick check for absolute path, before going through logic
return filefind(filename)
except IOError:
pass
if profile is None:
# profile unspecified, check if running from an IPython app
if IPApp.initialized():
app = IPApp.instance()
profile_dir = app.profile_dir
else:
# not running in IPython, use default profile
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default')
else:
# find profiledir by profile name:
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
security_dir = profile_dir.security_dir
return jupyter_client.find_connection_file(filename, path=['.', security_dir])
def _find_connection_file(connection_file, profile=None):
"""Return the absolute path for a connection file
- If nothing specified, return current Kernel's connection file
- If profile specified, show deprecation warning about finding connection files in profiles
- Otherwise, call jupyter_client.find_connection_file
"""
if connection_file is None:
# get connection file from current kernel
return get_connection_file()
else:
# connection file specified, allow shortnames:
if profile is not None:
warnings.warn(
"Finding connection file by profile is deprecated.",
DeprecationWarning, stacklevel=3,
)
return find_connection_file(connection_file, profile=profile)
else:
return jupyter_client.find_connection_file(connection_file)
def get_connection_info(connection_file=None, unpack=False, profile=None):
"""Return the connection information for the current Kernel.
Parameters
----------
connection_file : str [optional]
The connection file to be used. Can be given by absolute path, or
IPython will search in the security directory of a given profile.
If run from IPython,
If unspecified, the connection file for the currently running
IPython Kernel will be used, which is only allowed from inside a kernel.
unpack : bool [default: False]
if True, return the unpacked dict, otherwise just the string contents
of the file.
profile : DEPRECATED
Returns
-------
The connection dictionary of the current kernel, as string or dict,
depending on `unpack`.
"""
cf = _find_connection_file(connection_file, profile)
with open(cf) as f:
info = f.read()
if unpack:
info = json.loads(info)
# ensure key is bytes:
info['key'] = str_to_bytes(info.get('key', ''))
return info
def connect_qtconsole(connection_file=None, argv=None, profile=None):
"""Connect a qtconsole to the current kernel.
This is useful for connecting a second qtconsole to a kernel, or to a
local notebook.
Parameters
----------
connection_file : str [optional]
The connection file to be used. Can be given by absolute path, or
IPython will search in the security directory of a given profile.
If run from IPython,
If unspecified, the connection file for the currently running
IPython Kernel will be used, which is only allowed from inside a kernel.
argv : list [optional]
Any extra args to be passed to the console.
profile : DEPRECATED
Returns
-------
:class:`subprocess.Popen` instance running the qtconsole frontend
"""
argv = [] if argv is None else argv
cf = _find_connection_file(connection_file, profile)
cmd = ';'.join([
"from IPython.qt.console import qtconsoleapp",
"qtconsoleapp.main()"
])
kwargs = {}
if PY3:
# Launch the Qt console in a separate session & process group, so
# interrupting the kernel doesn't kill it. This kwarg is not on Py2.
kwargs['start_new_session'] = True
return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv,
stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'),
**kwargs
)
__all__ = [
'write_connection_file',
'get_connection_file',
'find_connection_file',
'get_connection_info',
'connect_qtconsole',
]

View file

@ -0,0 +1,62 @@
"""Publishing native (typically pickled) objects.
"""
import warnings
warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning)
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets.config import Configurable
from traitlets import Instance, Dict, CBytes, Any
from ipykernel.jsonutil import json_clean
from ipykernel.serialize import serialize_object
from jupyter_client.session import Session, extract_header
class ZMQDataPublisher(Configurable):
topic = topic = CBytes(b'datapub')
session = Instance(Session, allow_none=True)
pub_socket = Any(allow_none=True)
parent_header = Dict({})
def set_parent(self, parent):
"""Set the parent for outbound messages."""
self.parent_header = extract_header(parent)
def publish_data(self, data):
"""publish a data_message on the IOPub channel
Parameters
----------
data : dict
The data to be published. Think of it as a namespace.
"""
session = self.session
buffers = serialize_object(data,
buffer_threshold=session.buffer_threshold,
item_threshold=session.item_threshold,
)
content = json_clean(dict(keys=list(data.keys())))
session.send(self.pub_socket, 'data_message', content=content,
parent=self.parent_header,
buffers=buffers,
ident=self.topic,
)
def publish_data(data):
"""publish a data_message on the IOPub channel
Parameters
----------
data : dict
The data to be published. Think of it as a namespace.
"""
warnings.warn("ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", DeprecationWarning)
from ipykernel.zmqshell import ZMQInteractiveShell
ZMQInteractiveShell.instance().data_pub.publish_data(data)

View file

@ -0,0 +1,80 @@
"""Replacements for sys.displayhook that publish over ZMQ."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from IPython.core.displayhook import DisplayHook
from ipykernel.jsonutil import encode_images, json_clean
from ipython_genutils.py3compat import builtin_mod
from traitlets import Instance, Dict, Any
from jupyter_client.session import extract_header, Session
class ZMQDisplayHook(object):
"""A simple displayhook that publishes the object's repr over a ZeroMQ
socket."""
topic = b'execute_result'
def __init__(self, session, pub_socket):
self.session = session
self.pub_socket = pub_socket
self.parent_header = {}
def get_execution_count(self):
"""This method is replaced in kernelapp"""
return 0
def __call__(self, obj):
if obj is None:
return
builtin_mod._ = obj
sys.stdout.flush()
sys.stderr.flush()
contents = {u'execution_count': self.get_execution_count(),
u'data': {'text/plain': repr(obj)},
u'metadata': {}}
self.session.send(self.pub_socket, u'execute_result', contents,
parent=self.parent_header, ident=self.topic)
def set_parent(self, parent):
self.parent_header = extract_header(parent)
class ZMQShellDisplayHook(DisplayHook):
"""A displayhook subclass that publishes data using ZeroMQ. This is intended
to work with an InteractiveShell instance. It sends a dict of different
representations of the object."""
topic=None
session = Instance(Session, allow_none=True)
pub_socket = Any(allow_none=True)
parent_header = Dict({})
def set_parent(self, parent):
"""Set the parent for outbound messages."""
self.parent_header = extract_header(parent)
def start_displayhook(self):
self.msg = self.session.msg(u'execute_result', {
'data': {},
'metadata': {},
}, parent=self.parent_header)
def write_output_prompt(self):
"""Write the output prompt."""
self.msg['content']['execution_count'] = self.prompt_count
def write_format_data(self, format_dict, md_dict=None):
self.msg['content']['data'] = json_clean(encode_images(format_dict))
self.msg['content']['metadata'] = md_dict
def finish_displayhook(self):
"""Finish up all displayhook activities."""
sys.stdout.flush()
sys.stderr.flush()
if self.msg['content']['data']:
self.session.send(self.pub_socket, self.msg, ident=self.topic)
self.msg = None

View file

@ -0,0 +1,57 @@
"""Simple function for embedding an IPython kernel
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.utils.frame import extract_module_locals
from .kernelapp import IPKernelApp
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def embed_kernel(module=None, local_ns=None, **kwargs):
"""Embed and start an IPython kernel in a given scope.
Parameters
----------
module : ModuleType, optional
The module to load into IPython globals (default: caller)
local_ns : dict, optional
The namespace to load into IPython user namespace (default: caller)
kwargs : various, optional
Further keyword args are relayed to the IPKernelApp constructor,
allowing configuration of the Kernel. Will only have an effect
on the first embed_kernel call for a given process.
"""
# get the app if it exists, or set it up if it doesn't
if IPKernelApp.initialized():
app = IPKernelApp.instance()
else:
app = IPKernelApp.instance(**kwargs)
app.initialize([])
# Undo unnecessary sys module mangling from init_sys_modules.
# This would not be necessary if we could prevent it
# in the first place by using a different InteractiveShell
# subclass, as in the regular embed case.
main = app.kernel.shell._orig_sys_modules_main_mod
if main is not None:
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
# load the calling scope if not given
(caller_module, caller_locals) = extract_module_locals(1)
if module is None:
module = caller_module
if local_ns is None:
local_ns = caller_locals
app.kernel.user_module = module
app.kernel.user_ns = local_ns
app.shell.set_completer_frame()
app.start()

View file

@ -0,0 +1,432 @@
# encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from functools import partial
import os
import sys
import platform
import zmq
from distutils.version import LooseVersion as V
from traitlets.config.application import Application
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
def process_stream_events():
"""fall back to main loop when there's a socket event"""
# call flush to ensure that the stream doesn't lose events
# due to our consuming of the edge-triggered FD
# flush returns the number of events consumed.
# if there were any, wake it up
if stream.flush(limit=1):
notifier.setEnabled(False)
kernel.app.quit()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
# there may already be unprocessed events waiting.
# these events will not wake zmq's edge-triggered FD
# since edge-triggered notification only occurs on new i/o activity.
# process all the waiting events immediately
# so we start in a clean state ensuring that any new i/o events will notify.
# schedule first call on the eventloop as soon as it's running,
# so we don't block here processing events
timer = QtCore.QTimer(kernel.app)
timer.setSingleShot(True)
timer.timeout.connect(process_stream_events)
timer.start(0)
# mapping of keys to loop functions
loop_map = {
'inline': None,
'nbagg': None,
'notebook': None,
'ipympl': None,
'widget': None,
None: None,
}
def register_integration(*toolkitnames):
"""Decorator to register an event loop to integrate with the IPython kernel
The decorator takes names to register the event loop as for the %gui magic.
You can provide alternative names for the same toolkit.
The decorated function should take a single argument, the IPython kernel
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
at least every ``kernel._poll_interval`` seconds, and start the event loop.
:mod:`ipykernel.eventloops` provides and registers such functions
for a few common event loops.
"""
def decorator(func):
for name in toolkitnames:
loop_map[name] = func
func.exit_hook = lambda kernel: None
def exit_decorator(exit_func):
"""@func.exit is now a decorator
to register a function to be called on exit
"""
func.exit_hook = exit_func
return exit_func
func.exit = exit_decorator
return func
return decorator
def _loop_qt(app):
"""Inner-loop for running the Qt eventloop
Pulled from guisupport.start_event_loop in IPython < 5.2,
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
rather than if the eventloop is actually running.
"""
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
@register_integration('qt4')
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
# Only register the eventloop for the shell stream because doing
# it for the control stream is generating a bunch of unnecessary
# warnings on Windows.
_notify_stream_qt(kernel, kernel.shell_streams[0])
_loop_qt(kernel.app)
@register_integration('qt', 'qt5')
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration."""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
# exit and watch are the same for qt 4 and 5
@loop_qt4.exit
@loop_qt5.exit
def loop_qt_exit(kernel):
kernel.app.exit()
def _loop_wx(app):
"""Inner-loop for running the Wx eventloop
Pulled from guisupport.start_event_loop in IPython < 5.2,
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
rather than if the eventloop is actually running.
"""
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
@register_integration('wx')
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
# Wx uses milliseconds
poll_interval = int(1000 * kernel._poll_interval)
def wake():
"""wake from wx"""
for stream in kernel.shell_streams:
if stream.flush(limit=1):
kernel.app.ExitMainLoop()
return
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to defer back to the tornado event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(wake)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
if not (
getattr(kernel, 'app', None)
and isinstance(kernel.app, wx.App)
):
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
_loop_wx(kernel.app)
@loop_wx.exit
def loop_wx_exit(kernel):
import wx
wx.Exit()
@register_integration('tk')
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
from tkinter import Tk, READABLE
app = Tk()
# Capability detection:
# per https://docs.python.org/3/library/tkinter.html#file-handlers
# file handlers are not available on Windows
if hasattr(app, 'createfilehandler'):
# A basic wrapper for structural similarity with the Windows version
class BasicAppWrapper(object):
def __init__(self, app):
self.app = app
self.app.withdraw()
def process_stream_events(stream, *a, **kw):
"""fall back to main loop when there's a socket event"""
if stream.flush(limit=1):
app.tk.deletefilehandler(stream.getsockopt(zmq.FD))
app.quit()
# For Tkinter, we create a Tk object and call its withdraw method.
kernel.app_wrapper = BasicAppWrapper(app)
for stream in kernel.shell_streams:
notifier = partial(process_stream_events, stream)
# seems to be needed for tk
notifier.__name__ = "notifier"
app.tk.createfilehandler(stream.getsockopt(zmq.FD), READABLE, notifier)
# schedule initial call after start
app.after(0, notifier)
app.mainloop()
else:
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000 * kernel._poll_interval)
class TimedAppWrapper(object):
def __init__(self, app, func):
self.app = app
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.app_wrapper = TimedAppWrapper(app, doi)
kernel.app_wrapper.start()
@loop_tk.exit
def loop_tk_exit(kernel):
kernel.app_wrapper.app.destroy()
@register_integration('gtk')
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
kernel._gtk = gtk_kernel
@loop_gtk.exit
def loop_gtk_exit(kernel):
kernel._gtk.stop()
@register_integration('gtk3')
def loop_gtk3(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtk3embed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
kernel._gtk = gtk_kernel
@loop_gtk3.exit
def loop_gtk3_exit(kernel):
kernel._gtk.stop()
@register_integration('osx')
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
from ._eventloop_macos import mainloop, stop
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
# wake the eventloop when we get a signal
stop()
if etype is KeyboardInterrupt:
print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__)
else:
real_excepthook(etype, value, tb)
while not kernel.shell.exit_now:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
mainloop(kernel._poll_interval)
for stream in kernel.shell_streams:
if stream.flush(limit=1):
# events to process, return control to kernel
return
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
print("KeyboardInterrupt caught in kernel", file=sys.__stdout__)
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
@loop_cocoa.exit
def loop_cocoa_exit(kernel):
from ._eventloop_macos import stop
stop()
@register_integration('asyncio')
def loop_asyncio(kernel):
'''Start a kernel with asyncio event loop support.'''
import asyncio
loop = asyncio.get_event_loop()
# loop is already running (e.g. tornado 5), nothing left to do
if loop.is_running():
return
if loop.is_closed():
# main loop is closed, create a new one
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop._should_close = False
# pause eventloop when there's an event on a zmq socket
def process_stream_events(stream):
"""fall back to main loop when there's a socket event"""
if stream.flush(limit=1):
loop.stop()
for stream in kernel.shell_streams:
fd = stream.getsockopt(zmq.FD)
notifier = partial(process_stream_events, stream)
loop.add_reader(fd, notifier)
loop.call_soon(notifier)
while True:
error = None
try:
loop.run_forever()
except KeyboardInterrupt:
continue
except Exception as e:
error = e
if loop._should_close:
loop.close()
if error is not None:
raise error
break
@loop_asyncio.exit
def loop_asyncio_exit(kernel):
"""Exit hook for asyncio"""
import asyncio
loop = asyncio.get_event_loop()
@asyncio.coroutine
def close_loop():
if hasattr(loop, 'shutdown_asyncgens'):
yield from loop.shutdown_asyncgens()
loop._should_close = True
loop.stop()
if loop.is_running():
close_loop()
elif not loop.is_closed():
loop.run_until_complete(close_loop)
loop.close()
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop

View file

@ -0,0 +1,15 @@
"""GUI support for the IPython ZeroMQ kernel.
This package contains the various toolkit-dependent utilities we use to enable
coordination between the IPython kernel and the event loops of the various GUI
toolkits.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed as part of this
# software.
#-----------------------------------------------------------------------------

View file

@ -0,0 +1,88 @@
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
# Third-party
import gi
gi.require_version ('Gdk', '3.0')
gi.require_version ('Gtk', '3.0')
from gi.repository import GObject, Gtk
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class GTKEmbed(object):
"""A class to embed a kernel into the GTK main event loop.
"""
def __init__(self, kernel):
self.kernel = kernel
# These two will later store the real gtk functions when we hijack them
self.gtk_main = None
self.gtk_main_quit = None
def start(self):
"""Starts the GTK main event loop and sets our kernel startup routine.
"""
# Register our function to initiate the kernel and start gtk
GObject.idle_add(self._wire_kernel)
Gtk.main()
def _wire_kernel(self):
"""Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
"""
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
GObject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
def iterate_kernel(self):
"""Run one iteration of the kernel and return True.
GTK timer functions must return True to be called again, so we make the
call to :meth:`do_one_iteration` and then return True for GTK.
"""
self.kernel.do_one_iteration()
return True
def stop(self):
# FIXME: this one isn't getting called because we have no reliable
# kernel shutdown. We need to fix that: once the kernel has a
# shutdown mechanism, it can call this.
self.gtk_main_quit()
sys.exit()
def _hijack_gtk(self):
"""Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- Gtk.main
- Gtk.main_quit
"""
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, Gtk.main = Gtk.main, dummy
orig_main_quit, Gtk.main_quit = Gtk.main_quit, dummy
return orig_main, orig_main_quit

View file

@ -0,0 +1,86 @@
"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import sys
# Third-party
import gobject
import gtk
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class GTKEmbed(object):
"""A class to embed a kernel into the GTK main event loop.
"""
def __init__(self, kernel):
self.kernel = kernel
# These two will later store the real gtk functions when we hijack them
self.gtk_main = None
self.gtk_main_quit = None
def start(self):
"""Starts the GTK main event loop and sets our kernel startup routine.
"""
# Register our function to initiate the kernel and start gtk
gobject.idle_add(self._wire_kernel)
gtk.main()
def _wire_kernel(self):
"""Initializes the kernel inside GTK.
This is meant to run only once at startup, so it does its job and
returns False to ensure it doesn't get run again by GTK.
"""
self.gtk_main, self.gtk_main_quit = self._hijack_gtk()
gobject.timeout_add(int(1000*self.kernel._poll_interval),
self.iterate_kernel)
return False
def iterate_kernel(self):
"""Run one iteration of the kernel and return True.
GTK timer functions must return True to be called again, so we make the
call to :meth:`do_one_iteration` and then return True for GTK.
"""
self.kernel.do_one_iteration()
return True
def stop(self):
# FIXME: this one isn't getting called because we have no reliable
# kernel shutdown. We need to fix that: once the kernel has a
# shutdown mechanism, it can call this.
self.gtk_main_quit()
sys.exit()
def _hijack_gtk(self):
"""Hijack a few key functions in GTK for IPython integration.
Modifies pyGTK's main and main_quit with a dummy so user code does not
block IPython. This allows us to use %run to run arbitrary pygtk
scripts from a long-lived IPython session, and when they attempt to
start or stop
Returns
-------
The original functions that have been hijacked:
- gtk.main
- gtk.main_quit
"""
def dummy(*args, **kw):
pass
# save and trap main and main_quit from gtk
orig_main, gtk.main = gtk.main, dummy
orig_main_quit, gtk.main_quit = gtk.main_quit, dummy
return orig_main, orig_main_quit

View file

@ -0,0 +1,120 @@
"""The client and server for a basic ping-pong style heartbeat.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import errno
import os
import socket
from threading import Thread
import zmq
from jupyter_client.localinterfaces import localhost
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Heartbeat(Thread):
"A simple ping-pong style heartbeat that runs in a thread."
def __init__(self, context, addr=None):
if addr is None:
addr = ('tcp', localhost(), 0)
Thread.__init__(self)
self.context = context
self.transport, self.ip, self.port = addr
self.original_port = self.port
if self.original_port == 0:
self.pick_port()
self.addr = (self.ip, self.port)
self.daemon = True
def pick_port(self):
if self.transport == 'tcp':
s = socket.socket()
# '*' means all interfaces to 0MQ, which is '' to socket.socket
s.bind(('' if self.ip == '*' else self.ip, 0))
self.port = s.getsockname()[1]
s.close()
elif self.transport == 'ipc':
self.port = 1
while os.path.exists("%s-%s" % (self.ip, self.port)):
self.port = self.port + 1
else:
raise ValueError("Unrecognized zmq transport: %s" % self.transport)
return self.port
def _try_bind_socket(self):
c = ':' if self.transport == 'tcp' else '-'
return self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port))
def _bind_socket(self):
try:
win_in_use = errno.WSAEADDRINUSE
except AttributeError:
win_in_use = None
# Try up to 100 times to bind a port when in conflict to avoid
# infinite attempts in bad setups
max_attempts = 1 if self.original_port else 100
for attempt in range(max_attempts):
try:
self._try_bind_socket()
except zmq.ZMQError as ze:
if attempt == max_attempts - 1:
raise
# Raise if we have any error not related to socket binding
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
raise
# Raise if we have any error not related to socket binding
if self.original_port == 0:
self.pick_port()
else:
raise
else:
return
def run(self):
self.socket = self.context.socket(zmq.ROUTER)
self.socket.linger = 1000
try:
self._bind_socket()
except Exception:
self.socket.close()
raise
while True:
try:
zmq.device(zmq.QUEUE, self.socket, self.socket)
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
# signal interrupt, resume heartbeat
continue
elif e.errno == zmq.ETERM:
# context terminated, close socket and exit
try:
self.socket.close()
except zmq.ZMQError:
# suppress further errors during cleanup
# this shouldn't happen, though
pass
break
elif e.errno == zmq.ENOTSOCK:
# socket closed elsewhere, exit
break
else:
raise
else:
break

View file

@ -0,0 +1,8 @@
from .channels import (
InProcessChannel,
InProcessHBChannel,
)
from .client import InProcessKernelClient
from .manager import InProcessKernelManager
from .blocking import BlockingInProcessKernelClient

View file

@ -0,0 +1,93 @@
""" Implements a fully blocking kernel client.
Useful for test suites and blocking terminal interfaces.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
try:
from queue import Queue, Empty # Py 3
except ImportError:
from Queue import Queue, Empty # Py 2
# IPython imports
from traitlets import Type
# Local imports
from .channels import (
InProcessChannel,
)
from .client import InProcessKernelClient
class BlockingInProcessChannel(InProcessChannel):
def __init__(self, *args, **kwds):
super(BlockingInProcessChannel, self).__init__(*args, **kwds)
self._in_queue = Queue()
def call_handlers(self, msg):
self._in_queue.put(msg)
def get_msg(self, block=True, timeout=None):
""" Gets a message if there is one that is ready. """
if timeout is None:
# Queue.get(timeout=None) has stupid uninteruptible
# behavior, so wait for a week instead
timeout = 604800
return self._in_queue.get(block, timeout)
def get_msgs(self):
""" Get all messages that are currently ready. """
msgs = []
while True:
try:
msgs.append(self.get_msg(block=False))
except Empty:
break
return msgs
def msg_ready(self):
""" Is there a message that has been received? """
return not self._in_queue.empty()
class BlockingInProcessStdInChannel(BlockingInProcessChannel):
def call_handlers(self, msg):
""" Overridden for the in-process channel.
This methods simply calls raw_input directly.
"""
msg_type = msg['header']['msg_type']
if msg_type == 'input_request':
_raw_input = self.client.kernel._sys_raw_input
prompt = msg['content']['prompt']
print(prompt, end='', file=sys.__stdout__)
sys.__stdout__.flush()
self.client.input(_raw_input())
class BlockingInProcessKernelClient(InProcessKernelClient):
# The classes to use for the various channels.
shell_channel_class = Type(BlockingInProcessChannel)
iopub_channel_class = Type(BlockingInProcessChannel)
stdin_channel_class = Type(BlockingInProcessStdInChannel)
def wait_for_ready(self):
# Wait for kernel info reply on shell channel
while True:
msg = self.shell_channel.get_msg(block=True)
if msg['msg_type'] == 'kernel_info_reply':
self._handle_kernel_info_reply(msg)
break
# Flush IOPub channel
while True:
try:
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
print(msg['msg_type'])
except Empty:
break

View file

@ -0,0 +1,97 @@
"""A kernel client for in-process kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_client.channelsabc import HBChannelABC
from .socket import DummySocket
#-----------------------------------------------------------------------------
# Channel classes
#-----------------------------------------------------------------------------
class InProcessChannel(object):
"""Base class for in-process channels."""
proxy_methods = []
def __init__(self, client=None):
super(InProcessChannel, self).__init__()
self.client = client
self._is_alive = False
def is_alive(self):
return self._is_alive
def start(self):
self._is_alive = True
def stop(self):
self._is_alive = False
def call_handlers(self, msg):
""" This method is called in the main thread when a message arrives.
Subclasses should override this method to handle incoming messages.
"""
raise NotImplementedError('call_handlers must be defined in a subclass.')
def flush(self, timeout=1.0):
pass
def call_handlers_later(self, *args, **kwds):
""" Call the message handlers later.
The default implementation just calls the handlers immediately, but this
method exists so that GUI toolkits can defer calling the handlers until
after the event loop has run, as expected by GUI frontends.
"""
self.call_handlers(*args, **kwds)
def process_events(self):
""" Process any pending GUI events.
This method will be never be called from a frontend without an event
loop (e.g., a terminal frontend).
"""
raise NotImplementedError
class InProcessHBChannel(object):
"""A dummy heartbeat channel interface for in-process kernels.
Normally we use the heartbeat to check that the kernel process is alive.
When the kernel is in-process, that doesn't make sense, but clients still
expect this interface.
"""
time_to_dead = 3.0
def __init__(self, client=None):
super(InProcessHBChannel, self).__init__()
self.client = client
self._is_alive = False
self._pause = True
def is_alive(self):
return self._is_alive
def start(self):
self._is_alive = True
def stop(self):
self._is_alive = False
def pause(self):
self._pause = True
def unpause(self):
self._pause = False
def is_beating(self):
return not self._pause
HBChannelABC.register(InProcessHBChannel)

View file

@ -0,0 +1,187 @@
"""A client for in-process kernels."""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# IPython imports
from ipykernel.inprocess.socket import DummySocket
from traitlets import Type, Instance, default
from jupyter_client.clientabc import KernelClientABC
from jupyter_client.client import KernelClient
# Local imports
from .channels import (
InProcessChannel,
InProcessHBChannel,
)
#-----------------------------------------------------------------------------
# Main kernel Client class
#-----------------------------------------------------------------------------
class InProcessKernelClient(KernelClient):
"""A client for an in-process kernel.
This class implements the interface of
`jupyter_client.clientabc.KernelClientABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `jupyter_client.client.KernelClient` for docstrings.
"""
# The classes to use for the various channels.
shell_channel_class = Type(InProcessChannel)
iopub_channel_class = Type(InProcessChannel)
stdin_channel_class = Type(InProcessChannel)
control_channel_class = Type(InProcessChannel)
hb_channel_class = Type(InProcessHBChannel)
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
@default('blocking_class')
def _default_blocking_class(self):
from .blocking import BlockingInProcessKernelClient
return BlockingInProcessKernelClient
def get_connection_info(self):
d = super(InProcessKernelClient, self).get_connection_info()
d['kernel'] = self.kernel
return d
def start_channels(self, *args, **kwargs):
super(InProcessKernelClient, self).start_channels()
self.kernel.frontends.append(self)
@property
def shell_channel(self):
if self._shell_channel is None:
self._shell_channel = self.shell_channel_class(self)
return self._shell_channel
@property
def iopub_channel(self):
if self._iopub_channel is None:
self._iopub_channel = self.iopub_channel_class(self)
return self._iopub_channel
@property
def stdin_channel(self):
if self._stdin_channel is None:
self._stdin_channel = self.stdin_channel_class(self)
return self._stdin_channel
@property
def control_channel(self):
if self._control_channel is None:
self._control_channel = self.control_channel_class(self)
return self._control_channel
@property
def hb_channel(self):
if self._hb_channel is None:
self._hb_channel = self.hb_channel_class(self)
return self._hb_channel
# Methods for sending specific messages
# -------------------------------------
def execute(self, code, silent=False, store_history=True,
user_expressions={}, allow_stdin=None):
if allow_stdin is None:
allow_stdin = self.allow_stdin
content = dict(code=code, silent=silent, store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin)
msg = self.session.msg('execute_request', content)
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def complete(self, code, cursor_pos=None):
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos)
msg = self.session.msg('complete_request', content)
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def inspect(self, code, cursor_pos=None, detail_level=0):
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos,
detail_level=detail_level,
)
msg = self.session.msg('inspect_request', content)
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def history(self, raw=True, output=False, hist_access_type='range', **kwds):
content = dict(raw=raw, output=output,
hist_access_type=hist_access_type, **kwds)
msg = self.session.msg('history_request', content)
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def shutdown(self, restart=False):
# FIXME: What to do here?
raise NotImplementedError('Cannot shutdown in-process kernel')
def kernel_info(self):
"""Request kernel info."""
msg = self.session.msg('kernel_info_request')
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def comm_info(self, target_name=None):
"""Request a dictionary of valid comms and their targets."""
if target_name is None:
content = {}
else:
content = dict(target_name=target_name)
msg = self.session.msg('comm_info_request', content)
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def input(self, string):
if self.kernel is None:
raise RuntimeError('Cannot send input reply. No kernel exists.')
self.kernel.raw_input_str = string
def is_complete(self, code):
msg = self.session.msg('is_complete_request', {'code': code})
self._dispatch_to_kernel(msg)
return msg['header']['msg_id']
def _dispatch_to_kernel(self, msg):
""" Send a message to the kernel and handle a reply.
"""
kernel = self.kernel
if kernel is None:
raise RuntimeError('Cannot send request. No kernel exists.')
stream = DummySocket()
self.session.send(stream, msg)
msg_parts = stream.recv_multipart()
kernel.dispatch_shell(stream, msg_parts)
idents, reply_msg = self.session.recv(stream, copy=False)
self.shell_channel.call_handlers_later(reply_msg)
#-----------------------------------------------------------------------------
# ABC Registration
#-----------------------------------------------------------------------------
KernelClientABC.register(InProcessKernelClient)

View file

@ -0,0 +1,8 @@
"""Shared constants.
"""
# Because inprocess communication is not networked, we can use a common Session
# key everywhere. This is not just the empty bytestring to avoid tripping
# certain security checks in the rest of Jupyter that assumes that empty keys
# are insecure.
INPROCESS_KEY = b'inprocess'

View file

@ -0,0 +1,192 @@
"""An in-process kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
import logging
import sys
from IPython.core.interactiveshell import InteractiveShellABC
from ipykernel.jsonutil import json_clean
from traitlets import Any, Enum, Instance, List, Type, default
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from .constants import INPROCESS_KEY
from .socket import DummySocket
from ..iostream import OutStream, BackgroundSocket, IOPubThread
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class InProcessKernel(IPythonKernel):
#-------------------------------------------------------------------------
# InProcessKernel interface
#-------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(
Instance('ipykernel.inprocess.client.InProcessKernelClient',
allow_none=True)
)
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
default_value='inline')
raw_input_str = Any()
stdout = Any()
stderr = Any()
#-------------------------------------------------------------------------
# Kernel interface
#-------------------------------------------------------------------------
shell_class = Type(allow_none=True)
shell_streams = List()
control_stream = Any()
_underlying_iopub_socket = Instance(DummySocket, ())
iopub_thread = Instance(IOPubThread)
@default('iopub_thread')
def _default_iopub_thread(self):
thread = IOPubThread(self._underlying_iopub_socket)
thread.start()
return thread
iopub_socket = Instance(BackgroundSocket)
@default('iopub_socket')
def _default_iopub_socket(self):
return self.iopub_thread.background_socket
stdin_socket = Instance(DummySocket, ())
def __init__(self, **traits):
super(InProcessKernel, self).__init__(**traits)
self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent'])
self.shell.kernel = self
def execute_request(self, stream, ident, parent):
""" Override for temporary IO redirection. """
with self._redirected_io():
super(InProcessKernel, self).execute_request(stream, ident, parent)
def start(self):
""" Override registration of dispatchers for streams. """
self.shell.exit_now = False
def _abort_queues(self):
""" The in-process kernel doesn't abort requests. """
pass
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
msg = self.session.msg(u'input_request', content, parent)
for frontend in self.frontends:
if frontend.session.session == parent['header']['session']:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error('No frontend found for raw_input request')
return str()
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str
#-------------------------------------------------------------------------
# Protected interface
#-------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
""" Temporarily redirect IO to the kernel.
"""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
sys.stdout, sys.stderr = sys_stdout, sys_stderr
#------ Trait change handlers --------------------------------------------
def _io_dispatch(self, change):
""" Called when a message is sent to the IO socket.
"""
ident, msg = self.session.recv(self.iopub_socket, copy=False)
for frontend in self.frontends:
frontend.iopub_channel.call_handlers(msg)
#------ Trait initializers -----------------------------------------------
@default('log')
def _default_log(self):
return logging.getLogger(__name__)
@default('session')
def _default_session(self):
from jupyter_client.session import Session
return Session(parent=self, key=INPROCESS_KEY)
@default('shell_class')
def _default_shell_class(self):
return InProcessInteractiveShell
@default('stdout')
def _default_stdout(self):
return OutStream(self.session, self.iopub_thread, u'stdout')
@default('stderr')
def _default_stderr(self):
return OutStream(self.session, self.iopub_thread, u'stderr')
#-----------------------------------------------------------------------------
# Interactive shell subclass
#-----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#-------------------------------------------------------------------------
# InteractiveShell interface
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
from ipykernel.eventloops import enable_gui
if not gui:
gui = self.kernel.gui
enable_gui(gui, kernel=self.kernel)
self.active_eventloop = gui
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)

View file

@ -0,0 +1,81 @@
"""A kernel manager for in-process kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Instance, DottedObjectName, default
from jupyter_client.managerabc import KernelManagerABC
from jupyter_client.manager import KernelManager
from jupyter_client.session import Session
from .constants import INPROCESS_KEY
class InProcessKernelManager(KernelManager):
"""A manager for an in-process kernel.
This class implements the interface of
`jupyter_client.kernelmanagerabc.KernelManagerABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `jupyter_client.kernelmanager.KernelManager` for docstrings.
"""
# The kernel process with which the KernelManager is communicating.
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
# the client class for KM.client() shortcut
client_class = DottedObjectName('ipykernel.inprocess.BlockingInProcessKernelClient')
@default('blocking_class')
def _default_blocking_class(self):
from .blocking import BlockingInProcessKernelClient
return BlockingInProcessKernelClient
@default('session')
def _default_session(self):
# don't sign in-process messages
return Session(key=INPROCESS_KEY, parent=self)
#--------------------------------------------------------------------------
# Kernel management methods
#--------------------------------------------------------------------------
def start_kernel(self, **kwds):
from ipykernel.inprocess.ipkernel import InProcessKernel
self.kernel = InProcessKernel(parent=self, session=self.session)
def shutdown_kernel(self):
self.kernel.iopub_thread.stop()
self._kill_kernel()
def restart_kernel(self, now=False, **kwds):
self.shutdown_kernel()
self.start_kernel(**kwds)
@property
def has_kernel(self):
return self.kernel is not None
def _kill_kernel(self):
self.kernel = None
def interrupt_kernel(self):
raise NotImplementedError("Cannot interrupt in-process kernel.")
def signal_kernel(self, signum):
raise NotImplementedError("Cannot signal in-process kernel.")
def is_alive(self):
return self.kernel is not None
def client(self, **kwargs):
kwargs['kernel'] = self.kernel
return super(InProcessKernelManager, self).client(**kwargs)
#-----------------------------------------------------------------------------
# ABC Registration
#-----------------------------------------------------------------------------
KernelManagerABC.register(InProcessKernelManager)

View file

@ -0,0 +1,68 @@
""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import abc
import warnings
try:
from queue import Queue # Py 3
except ImportError:
from Queue import Queue # Py 2
import zmq
from traitlets import HasTraits, Instance, Int
from ipython_genutils.py3compat import with_metaclass
#-----------------------------------------------------------------------------
# Generic socket interface
#-----------------------------------------------------------------------------
class SocketABC(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def recv_multipart(self, flags=0, copy=True, track=False):
raise NotImplementedError
@abc.abstractmethod
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
raise NotImplementedError
@classmethod
def register(cls, other_cls):
if other_cls is not DummySocket:
warnings.warn("SocketABC is deprecated since ipykernel version 4.5.0.",
DeprecationWarning, stacklevel=2)
abc.ABCMeta.register(cls, other_cls)
#-----------------------------------------------------------------------------
# Dummy socket class
#-----------------------------------------------------------------------------
class DummySocket(HasTraits):
""" A dummy socket implementing (part of) the zmq.Socket interface. """
queue = Instance(Queue, ())
message_sent = Int(0) # Should be an Event
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context()
#-------------------------------------------------------------------------
# Socket interface
#-------------------------------------------------------------------------
def recv_multipart(self, flags=0, copy=True, track=False):
return self.queue.get_nowait()
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
msg_parts = list(map(zmq.Message, msg_parts))
self.queue.put_nowait(msg_parts)
self.message_sent += 1
def flush(self, timeout=1.0):
"""no-op to comply with stream API"""
pass
SocketABC.register(DummySocket)

View file

@ -0,0 +1,110 @@
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import sys
import unittest
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
from ipython_genutils import py3compat
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
def _init_asyncio_patch():
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
_init_asyncio_patch()
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
if py3compat.PY3:
self.kc.execute('x = input()')
else:
self.kc.execute('x = raw_input()')
finally:
sys.stdin = sys_stdin
assert self.km.kernel.shell.user_ns.get('x') == 'foobar'
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
assert io.stdout == 'foo\n'
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
assert out == 'bar\n'
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')

View file

@ -0,0 +1,115 @@
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import unittest
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
#-----------------------------------------------------------------------------
# Test case
#-----------------------------------------------------------------------------
class InProcessKernelManagerTestCase(unittest.TestCase):
def setUp(self):
self.km = InProcessKernelManager()
def tearDown(self):
if self.km.has_kernel:
self.km.shutdown_kernel()
def test_interface(self):
""" Does the in-process kernel manager implement the basic KM interface?
"""
km = self.km
assert not km.has_kernel
km.start_kernel()
assert km.has_kernel
assert km.kernel is not None
kc = km.client()
assert not kc.channels_running
kc.start_channels()
assert kc.channels_running
old_kernel = km.kernel
km.restart_kernel()
self.assertIsNotNone(km.kernel)
assert km.kernel != old_kernel
km.shutdown_kernel()
assert not km.has_kernel
self.assertRaises(NotImplementedError, km.interrupt_kernel)
self.assertRaises(NotImplementedError, km.signal_kernel, 9)
kc.stop_channels()
assert not kc.channels_running
def test_execute(self):
""" Does executing code in an in-process kernel work?
"""
km = self.km
km.start_kernel()
kc = km.client()
kc.start_channels()
kc.wait_for_ready()
kc.execute('foo = 1')
assert km.kernel.shell.user_ns['foo'] == 1
def test_complete(self):
""" Does requesting completion from an in-process kernel work?
"""
km = self.km
km.start_kernel()
kc = km.client()
kc.start_channels()
kc.wait_for_ready()
km.kernel.shell.push({'my_bar': 0, 'my_baz': 1})
kc.complete('my_ba', 5)
msg = kc.get_shell_msg()
assert msg['header']['msg_type'] == 'complete_reply'
self.assertEqual(sorted(msg['content']['matches']),
['my_bar', 'my_baz'])
def test_inspect(self):
""" Does requesting object information from an in-process kernel work?
"""
km = self.km
km.start_kernel()
kc = km.client()
kc.start_channels()
kc.wait_for_ready()
km.kernel.shell.user_ns['foo'] = 1
kc.inspect('foo')
msg = kc.get_shell_msg()
assert msg['header']['msg_type'] == 'inspect_reply'
content = msg['content']
assert content['found']
text = content['data']['text/plain']
self.assertIn('int', text)
def test_history(self):
""" Does requesting history from an in-process kernel work?
"""
km = self.km
km.start_kernel()
kc = km.client()
kc.start_channels()
kc.wait_for_ready()
kc.execute('1')
kc.history(hist_access_type='tail', n=1)
msg = kc.shell_channel.get_msgs()[-1]
assert msg['header']['msg_type'] == 'history_reply'
history = msg['content']['history']
assert len(history) == 1
assert history[0][2] == '1'
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,440 @@
# coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
from collections import deque
try:
from importlib import lock_held as import_lock_held
except ImportError:
from imp import lock_held as import_lock_held
import os
import sys
import threading
import warnings
from io import StringIO, TextIOBase
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = deque()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.make_current()
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for i in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def close(self):
if self.closed:
return
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b'')
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def __init__(self, session, pub_thread, name, pipe=None, echo=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._subprocess_flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
self.echo = None
if echo:
if hasattr(echo, 'read') and hasattr(echo, 'write'):
self.echo = echo
else:
raise ValueError("echo argument must be a file like object")
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread and self.pub_thread.thread is not None and self.pub_thread.thread.is_alive():
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
# waiting across threads during import can cause deadlocks
# so only wait if import lock is not held
if not import_lock_held():
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print("Flush failed: {}".format(e),
file=sys.__stderr__)
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string):
if self.echo is not None:
try:
self.echo.write(string)
except OSError as e:
if self.echo is not sys.__stderr__:
print("Write failed: {}".format(e),
file=sys.__stderr__)
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if self._subprocess_flush_pending:
return
self._subprocess_flush_pending = True
# We can not rely on self._io_loop.call_later from a subprocess
self.pub_thread.schedule(self._flush)
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def writable(self):
return True
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()

View file

@ -0,0 +1,537 @@
"""The IPython kernel implementation"""
import asyncio
from contextlib import contextmanager
from functools import partial
import getpass
import signal
import sys
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from tornado import gen
from traitlets import Instance, Type, Any, List, Bool
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
from .eventloops import _use_appnope
try:
from IPython.core.interactiveshell import _asyncio_runner
except ImportError:
_asyncio_runner = None
try:
from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter
_use_experimental_60_completion = True
except ImportError:
_use_experimental_60_completion = False
_EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental'
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
use_experimental_completions = Bool(True,
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
).tag(config=True)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
if _use_appnope() and self._darwin_app_nap:
# Disable app-nap as the kernel is not a gui but can have guis
import appnope
appnope.nope()
help_links = List([
{
'text': "Python Reference",
'url': "https://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython Reference",
'url': "https://ipython.org/documentation.html",
},
{
'text': "NumPy Reference",
'url': "https://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy Reference",
'url': "https://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib Reference",
'url': "https://matplotlib.org/contents.html",
},
{
'text': "SymPy Reference",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas Reference",
'url': "https://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrementing done by KernelBase, in favour of our shell's
# execution counter.
pass
@contextmanager
def _cancel_on_sigint(self, future):
"""ContextManager for capturing SIGINT and cancelling a future
SIGINT raises in the event loop when running async code,
but we want it to halt a coroutine.
Ideally, it would raise KeyboardInterrupt,
but this turns it into a CancelledError.
At least it gets a decent traceback to the user.
"""
sigint_future = asyncio.Future()
# whichever future finishes first,
# cancel the other one
def cancel_unless_done(f, _ignored):
if f.cancelled() or f.done():
return
f.cancel()
# when sigint finishes,
# abort the coroutine with CancelledError
sigint_future.add_done_callback(
partial(cancel_unless_done, future)
)
# when the main future finishes,
# stop watching for SIGINT events
future.add_done_callback(
partial(cancel_unless_done, sigint_future)
)
def handle_sigint(*args):
def set_sigint_result():
if sigint_future.cancelled() or sigint_future.done():
return
sigint_future.set_result(1)
# use add_callback for thread safety
self.io_loop.add_callback(set_sigint_result)
# set the custom sigint hander during this context
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
try:
yield
finally:
# restore the previous sigint handler
signal.signal(signal.SIGINT, save_sigint)
@gen.coroutine
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'):
run_cell = shell.run_cell_async
should_run_async = shell.should_run_async
else:
should_run_async = lambda cell: False
# older IPython,
# use blocking run_cell and wrap it in coroutine
@gen.coroutine
def run_cell(*args, **kwargs):
return shell.run_cell(*args, **kwargs)
try:
# default case: runner is asyncio and asyncio is already running
# TODO: this should check every case for "are we inside the runner",
# not just asyncio
if (
_asyncio_runner
and should_run_async(code)
and shell.loop_runner is _asyncio_runner
and asyncio.get_event_loop().is_running()
):
coro = run_cell(code, store_history=store_history, silent=silent)
coro_future = asyncio.ensure_future(coro)
with self._cancel_on_sigint(coro_future):
res = None
try:
res = yield coro_future
finally:
shell.events.trigger('post_execute')
if not silent:
shell.events.trigger('post_run_cell', res)
else:
# runner isn't already running,
# make synchronous call,
# letting shell dispatch to loop runners
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
if _use_experimental_60_completion and self.use_experimental_completions:
return self._experimental_do_complete(code, cursor_pos)
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def _experimental_do_complete(self, code, cursor_pos):
"""
Experimental completions from IPython, using Jedi.
"""
if cursor_pos is None:
cursor_pos = len(code)
with _provisionalcompleter():
raw_completions = self.shell.Completer.completions(code, cursor_pos)
completions = list(_rectify_completions(code, raw_completions))
comps = []
for comp in completions:
comps.append(dict(
start=comp.start,
end=comp.end,
text=comp.text,
type=comp.type,
))
if completions:
s = completions[0].start
e = completions[0].end
matches = [c.text for c in completions]
else:
s = cursor_pos
e = cursor_pos
matches = []
return {'matches': matches,
'cursor_end': e,
'cursor_start': s,
'metadata': {_EXPERIMENTAL_KEY_NAME: comps},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
reply_content = {'status' : 'ok'}
reply_content['data'] = {}
reply_content['metadata'] = {}
try:
reply_content['data'].update(
self.shell.object_inspect_mime(
name,
detail_level=detail_level
)
)
if not self.shell.enable_html_pager:
reply_content['data'].pop('text/html')
reply_content['found'] = True
except KeyError:
reply_content['found'] = False
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
transformer_manager = getattr(self.shell, 'input_transformer_manager', None)
if transformer_manager is None:
# input_splitter attribute is deprecated
transformer_manager = self.shell.input_splitter
status, indent_spaces = transformer_manager.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)

View file

@ -0,0 +1,197 @@
"""Utilities to manipulate JSON objects."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from binascii import b2a_base64
import math
import re
import types
from datetime import datetime
import numbers
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type, iteritems
from ipython_genutils.encoding import DEFAULT_ENCODING
next_attr_name = '__next__' if py3compat.PY3 else 'next'
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# timestamp formats
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$")
# holy crap, strptime is not threadsafe.
# Calling it once at import seems to help.
datetime.strptime("1", "%d")
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# constants for identifying png/jpeg data
PNG = b'\x89PNG\r\n\x1a\n'
# front of PNG base64-encoded
PNG64 = b'iVBORw0KG'
JPEG = b'\xff\xd8'
# front of JPEG base64-encoded
JPEG64 = b'/9'
# constants for identifying gif data
GIF_64 = b'R0lGODdh'
GIF89_64 = b'R0lGODlh'
# front of PDF base64-encoded
PDF64 = b'JVBER'
def encode_images(format_dict):
"""b64-encodes images in a displaypub format dict
Perhaps this should be handled in json_clean itself?
Parameters
----------
format_dict : dict
A dictionary of display data keyed by mime-type
Returns
-------
format_dict : dict
A copy of the same dictionary,
but binary image data ('image/png', 'image/jpeg' or 'application/pdf')
is base64-encoded.
"""
# no need for handling of ambiguous bytestrings on Python 3,
# where bytes objects always represent binary data and thus
# base64-encoded.
if py3compat.PY3:
return format_dict
encoded = format_dict.copy()
pngdata = format_dict.get('image/png')
if isinstance(pngdata, bytes):
# make sure we don't double-encode
if not pngdata.startswith(PNG64):
pngdata = b2a_base64(pngdata)
encoded['image/png'] = pngdata.decode('ascii')
jpegdata = format_dict.get('image/jpeg')
if isinstance(jpegdata, bytes):
# make sure we don't double-encode
if not jpegdata.startswith(JPEG64):
jpegdata = b2a_base64(jpegdata)
encoded['image/jpeg'] = jpegdata.decode('ascii')
gifdata = format_dict.get('image/gif')
if isinstance(gifdata, bytes):
# make sure we don't double-encode
if not gifdata.startswith((GIF_64, GIF89_64)):
gifdata = b2a_base64(gifdata)
encoded['image/gif'] = gifdata.decode('ascii')
pdfdata = format_dict.get('application/pdf')
if isinstance(pdfdata, bytes):
# make sure we don't double-encode
if not pdfdata.startswith(PDF64):
pdfdata = b2a_base64(pdfdata)
encoded['application/pdf'] = pdfdata.decode('ascii')
return encoded
def json_clean(obj):
"""Clean an object to ensure it's safe to encode in JSON.
Atomic, immutable objects are returned unmodified. Sets and tuples are
converted to lists, lists are copied and dicts are also copied.
Note: dicts whose keys could cause collisions upon encoding (such as a dict
with both the number 1 and the string '1' as keys) will cause a ValueError
to be raised.
Parameters
----------
obj : any python object
Returns
-------
out : object
A version of the input which will not cause an encoding error when
encoded as JSON. Note that this function does not *encode* its inputs,
it simply sanitizes it so that there will be no encoding errors later.
"""
# types that are 'atomic' and ok in json as-is.
atomic_ok = (unicode_type, type(None))
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
# Since bools are a subtype of Integrals, which are a subtype of Reals,
# we have to check them in that order.
if isinstance(obj, bool):
return obj
if isinstance(obj, numbers.Integral):
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
return int(obj)
if isinstance(obj, numbers.Real):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return float(obj)
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
if py3compat.PY3:
# unanmbiguous binary data is base64-encoded
# (this probably should have happened upstream)
return b2a_base64(obj).decode('ascii')
else:
# Python 2 bytestr is ambiguous,
# needs special handling for possible binary bytestrings.
# imperfect workaround: if ascii, assume text.
# otherwise assume binary, base64-encode (py3 behavior).
try:
return obj.decode('ascii')
except UnicodeDecodeError:
return b2a_base64(obj).decode('ascii')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(unicode_type, obj)))
if nkeys != nkeys_collapsed:
raise ValueError('dict cannot be safely converted to JSON: '
'key collision would lead to dropped values')
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k,v in iteritems(obj):
out[unicode_type(k)] = json_clean(v)
return out
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
# we don't understand it, it's probably an unserializable object
raise ValueError("Can't clean for JSON: %r" % obj)

View file

@ -0,0 +1,628 @@
"""An Application for launching a kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
import os
import sys
import errno
import signal
import traceback
import logging
from tornado import ioloop
import zmq
from zmq.eventloop import ioloop as zmq_ioloop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from ipython_genutils.path import filefind, ensure_dir_exists
from traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
)
from ipython_genutils.importstring import import_item
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_client import write_connection_file
from jupyter_client.connect import ConnectionFileMixin
# local imports
from .iostream import IOPubThread
from .heartbeat import Heartbeat
from .ipkernel import IPythonKernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from jupyter_client.session import (
Session, session_flags, session_aliases,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'transport': 'IPKernelApp.transport',
})
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
'trio-loop' : (
{'InteractiveShell' : {'trio_loop' : False}},
'Enable Trio as main event loop.'
),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
ConnectionFileMixin):
name='ipython-kernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = Type('ipykernel.ipkernel.IPythonKernel',
klass='ipykernel.kernelbase.Kernel',
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""").tag(config=True)
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat, allow_none=True)
context = Any()
shell_socket = Any()
control_socket = Any()
stdin_socket = Any()
iopub_socket = Any()
iopub_thread = Any()
ports = Dict()
subcommands = {
'install': (
'ipykernel.kernelspec.InstallIPythonKernelSpecApp',
'Install the IPython kernel'
),
}
# connection info:
connection_dir = Unicode()
@default('connection_dir')
def _default_connection_dir(self):
return jupyter_runtime_dir()
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.connection_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
trio_loop = Bool(False, help="Set main event loop.").tag(config=True)
quiet = Bool(True, help="Only send stdout/stderr to output stream").tag(config=True)
outstream_class = DottedObjectName('ipykernel.iostream.OutStream',
help="The importstring for the OutStream factory").tag(config=True)
displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook',
help="The importstring for the DisplayHook factory").tag(config=True)
# polling
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""").tag(config=True)
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""").tag(config=True)
def init_crash_handler(self):
sys.excepthook = self.excepthook
def excepthook(self, etype, evalue, tb):
# write uncaught traceback to 'real' stderr, not zmq-forwarder
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle and self.parent_handle != 1:
# PID 1 (init) is special and will never go away,
# only be reassigned.
# Parent polling doesn't work if ppid == 1 to start with.
self.poller = ParentPollerUnix()
def _try_bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def _bind_socket(self, s, port):
try:
win_in_use = errno.WSAEADDRINUSE
except AttributeError:
win_in_use = None
# Try up to 100 times to bind a port when in conflict to avoid
# infinite attempts in bad setups
max_attempts = 1 if port else 100
for attempt in range(max_attempts):
try:
return self._try_bind_socket(s, port)
except zmq.ZMQError as ze:
# Raise if we have any error not related to socket binding
if ze.errno != errno.EADDRINUSE and ze.errno != win_in_use:
raise
if attempt == max_attempts - 1:
raise
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, and I'll create it in this directory:
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
# Also, I will clean it up:
atexit.register(self.cleanup_connection_file)
return
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
assert self.context is None, "init_sockets cannot be called twice!"
self.context = context = zmq.Context()
atexit.register(self.close)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_socket.linger = 1000
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_socket.linger = 1000
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_socket.linger = 1000
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
if hasattr(zmq, 'ROUTER_HANDOVER'):
# set router-handover to workaround zeromq reconnect problems
# in certain rare circumstances
# see ipython/ipykernel#270 and zeromq/libzmq#2892
self.shell_socket.router_handover = \
self.control_socket.router_handover = \
self.stdin_socket.router_handover = 1
self.init_iopub(context)
def init_iopub(self, context):
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_socket.linger = 1000
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.configure_tornado_logger()
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
self.iopub_thread.start()
# backward-compat: wrap iopub socket API in background thread
self.iopub_socket = self.iopub_thread.background_socket
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def close(self):
"""Close zmq sockets in an orderly fashion"""
# un-capture IO before we start closing channels
self.reset_io()
self.log.info("Cleaning up sockets")
if self.heartbeat:
self.log.debug("Closing heartbeat channel")
self.heartbeat.context.term()
if self.iopub_thread:
self.log.debug("Closing iopub channel")
self.iopub_thread.stop()
self.iopub_thread.close()
for channel in ('shell', 'control', 'stdin'):
self.log.debug("Closing %s channel", channel)
socket = getattr(self, channel + "_socket", None)
if socket and not socket.closed:
socket.close()
self.log.debug("Terminating zmq context")
self.context.term()
self.log.debug("Terminated zmq context")
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.connection_dir:
# use shortname
tail = basename
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
# unless log-level is CRITICAL (--quiet)
if not self.parent_handle and self.log_level < logging.CRITICAL:
print(_ctrl_c_message, file=sys.__stdout__)
for line in lines:
print(line, file=sys.__stdout__)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
if sys.stdout is not None:
sys.stdout.flush()
e_stdout = None if self.quiet else sys.__stdout__
e_stderr = None if self.quiet else sys.__stderr__
sys.stdout = outstream_factory(self.session, self.iopub_thread,
u'stdout',
echo=e_stdout)
if sys.stderr is not None:
sys.stderr.flush()
sys.stderr = outstream_factory(self.session, self.iopub_thread,
u'stderr',
echo=e_stderr)
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
sys.displayhook = self.displayhook
self.patch_io()
def reset_io(self):
"""restore original io
restores state after init_io
"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.displayhook = sys.__displayhook__
def patch_io(self):
"""Patch important libraries that can't handle sys.stdout forwarding"""
try:
import faulthandler
except ImportError:
pass
else:
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
# change default file to __stderr__ from forwarded stderr
faulthandler_enable = faulthandler.enable
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
faulthandler.enable = enable
if hasattr(faulthandler, 'register'):
faulthandler_register = faulthandler.register
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
return faulthandler_register(signum, file=file, all_threads=all_threads,
chain=chain, **kwargs)
faulthandler.register = register
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = self.kernel_class.instance
kernel = kernel_factory(parent=self, session=self.session,
control_stream=control_stream,
shell_streams=[shell_stream, control_stream],
iopub_thread=self.iopub_thread,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports({
name + '_port': port for name, port in self.ports.items()
})
self.kernel = kernel
# Allow the displayhook to get the execution count
self.displayhook.get_execution_count = lambda: kernel.execution_count
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Register inline backend as default
# this is higher priority than matplotlibrc,
# but lower priority than anything else (mpl.use() for instance).
# This only affects matplotlib >= 1.5
if not os.environ.get('MPLBACKEND'):
os.environ['MPLBACKEND'] = 'module://ipykernel.pylab.backend_inline'
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace error-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=sys.stderr)
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = getattr(self.kernel, 'shell', None)
if self.shell:
self.shell.configurables.append(self)
def configure_tornado_logger(self):
""" Configure the tornado logging.Logger.
Must set up the tornado logger or else tornado will call
basicConfig for the root logger which makes the root logger
go to the real sys.stderr instead of the capture streams.
This function mimics the setup of logging.basicConfig.
"""
logger = logging.getLogger('tornado')
handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def init_pdb(self):
"""Replace pdb with IPython's version that is interruptible.
With the non-interruptible version, stopping pdb() locks up the kernel in a
non-recoverable state.
"""
import pdb
from IPython.core import debugger
if hasattr(debugger, "InterruptiblePdb"):
# Only available in newer IPython releases:
debugger.Pdb = debugger.InterruptiblePdb
pdb.Pdb = debugger.Pdb
pdb.set_trace = debugger.set_trace
@catch_config_error
def initialize(self, argv=None):
self._init_asyncio_patch()
super(IPKernelApp, self).initialize(argv)
if self.subapp is not None:
return
self.init_pdb()
self.init_blackhole()
self.init_connection_file()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.write_connection_file()
# Log connection info after writing connection file, so that the connection
# file is definitely available at the time someone reads the log.
self.log_connection_info()
self.init_io()
try:
self.init_signal()
except:
# Catch exception when initializing signal fails, eg when running the
# kernel on a separate thread
if self.log_level < logging.CRITICAL:
self.log.error("Unable to initialize signal:", exc_info=True)
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
if self.shell:
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.subapp is not None:
return self.subapp.start()
if self.poller is not None:
self.poller.start()
self.kernel.start()
self.io_loop = ioloop.IOLoop.current()
if self.trio_loop:
from ipykernel.trio_runner import TrioRunner
tr = TrioRunner()
tr.initialize(self.kernel, self.io_loop)
try:
tr.run()
except KeyboardInterrupt:
pass
else:
try:
self.io_loop.start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,924 @@
"""Base class for a kernel that talks to frontends over 0MQ."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from datetime import datetime
from functools import partial
import itertools
import logging
from signal import signal, default_int_handler, SIGINT
import sys
import time
import uuid
try:
# jupyter_client >= 5, use tz-aware now
from jupyter_client.session import utcnow as now
except ImportError:
# jupyter_client < 5, use local now()
now = datetime.now
from tornado import ioloop
from tornado import gen
from tornado.queues import PriorityQueue, QueueEmpty
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from traitlets.config.configurable import SingletonConfigurable
from IPython.core.error import StdinNotImplementedError
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type, string_types
from ipykernel.jsonutil import json_clean
from traitlets import (
Any, Instance, Float, Dict, List, Set, Integer, Unicode, Bool,
observe, default
)
from jupyter_client.session import Session
from ._version import kernel_protocol_version
CONTROL_PRIORITY = 1
SHELL_PRIORITY = 10
ABORT_PRIORITY = 20
class Kernel(SingletonConfigurable):
#---------------------------------------------------------------------------
# Kernel interface
#---------------------------------------------------------------------------
# attribute to override with a GUI
eventloop = Any(None)
@observe('eventloop')
def _update_eventloop(self, change):
"""schedule call to eventloop from IOLoop"""
loop = ioloop.IOLoop.current()
if change.new is not None:
loop.add_callback(self.enter_eventloop)
session = Instance(Session, allow_none=True)
profile_dir = Instance('IPython.core.profiledir.ProfileDir', allow_none=True)
shell_streams = List()
control_stream = Instance(ZMQStream, allow_none=True)
iopub_socket = Any()
iopub_thread = Any()
stdin_socket = Any()
log = Instance(logging.Logger, allow_none=True)
# identities:
int_id = Integer(-1)
ident = Unicode()
@default('ident')
def _default_ident(self):
return unicode_type(uuid.uuid4())
# This should be overridden by wrapper kernels that implement any real
# language.
language_info = {}
# any links that should go in the help menu
help_links = List()
# Private interface
_darwin_app_nap = Bool(True,
help="""Whether to use appnope for compatibility with OS X App Nap.
Only affects OS X >= 10.9.
"""
).tag(config=True)
# track associations with current request
_allow_stdin = Bool(False)
_parent_header = Dict()
_parent_ident = Any(b'')
# Time to sleep after flushing the stdout/err buffers in each execute
# cycle. While this introduces a hard limit on the minimal latency of the
# execute cycle, it helps prevent output synchronization problems for
# clients.
# Units are in seconds. The minimum zmq latency on local host is probably
# ~150 microseconds, set this to 500us for now. We may need to increase it
# a little if it's not enough after more interactive testing.
_execute_sleep = Float(0.0005).tag(config=True)
# Frequency of the kernel's event loop.
# Units are in seconds, kernel subclasses for GUI toolkits may need to
# adapt to milliseconds.
_poll_interval = Float(0.01).tag(config=True)
stop_on_error_timeout = Float(
0.1,
config=True,
help="""time (in seconds) to wait for messages to arrive
when aborting queued requests after an error.
Requests that arrive within this window after an error
will be cancelled.
Increase in the event of unusually slow network
causing significant delays,
which can manifest as e.g. "Run all" in a notebook
aborting some, but not all, messages after an error.
"""
)
# If the shutdown was requested over the network, we leave here the
# necessary reply message so it can be sent by our registered atexit
# handler. This ensures that the reply is only sent to clients truly at
# the end of our shutdown process (which happens after the underlying
# IPython shell's own shutdown).
_shutdown_message = None
# This is a dict of port number that the kernel is listening on. It is set
# by record_ports and used by connect_request.
_recorded_ports = Dict()
# set of aborted msg_ids
aborted = Set()
# Track execution count here. For IPython, we override this to use the
# execution count we store in the shell.
execution_count = 0
msg_types = [
'execute_request', 'complete_request',
'inspect_request', 'history_request',
'comm_info_request', 'kernel_info_request',
'connect_request', 'shutdown_request',
'is_complete_request',
# deprecated:
'apply_request',
]
# add deprecated ipyparallel control messages
control_msg_types = msg_types + ['clear_request', 'abort_request']
def __init__(self, **kwargs):
super(Kernel, self).__init__(**kwargs)
# Build dict of handlers for message types
self.shell_handlers = {}
for msg_type in self.msg_types:
self.shell_handlers[msg_type] = getattr(self, msg_type)
self.control_handlers = {}
for msg_type in self.control_msg_types:
self.control_handlers[msg_type] = getattr(self, msg_type)
@gen.coroutine
def dispatch_control(self, msg):
"""dispatch control requests"""
idents, msg = self.session.feed_identities(msg, copy=False)
try:
msg = self.session.deserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Control Message", exc_info=True)
return
self.log.debug("Control received: %s", msg)
# Set the parent message for side effects.
self.set_parent(idents, msg)
self._publish_status(u'busy')
if self._aborting:
self._send_abort_reply(self.control_stream, msg, idents)
self._publish_status(u'idle')
return
header = msg['header']
msg_type = header['msg_type']
handler = self.control_handlers.get(msg_type, None)
if handler is None:
self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
else:
try:
yield gen.maybe_future(handler(self.control_stream, idents, msg))
except Exception:
self.log.error("Exception in control handler:", exc_info=True)
sys.stdout.flush()
sys.stderr.flush()
self._publish_status(u'idle')
# flush to ensure reply is sent
self.control_stream.flush(zmq.POLLOUT)
def should_handle(self, stream, msg, idents):
"""Check whether a shell-channel message should be handled
Allows subclasses to prevent handling of certain messages (e.g. aborted requests).
"""
msg_id = msg['header']['msg_id']
if msg_id in self.aborted:
msg_type = msg['header']['msg_type']
# is it safe to assume a msg_id will not be resubmitted?
self.aborted.remove(msg_id)
self._send_abort_reply(stream, msg, idents)
return False
return True
@gen.coroutine
def dispatch_shell(self, stream, msg):
"""dispatch shell requests"""
idents, msg = self.session.feed_identities(msg, copy=False)
try:
msg = self.session.deserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
return
# Set the parent message for side effects.
self.set_parent(idents, msg)
self._publish_status(u'busy')
if self._aborting:
self._send_abort_reply(stream, msg, idents)
self._publish_status(u'idle')
# flush to ensure reply is sent before
# handling the next request
stream.flush(zmq.POLLOUT)
return
msg_type = msg['header']['msg_type']
# Print some info about this message and leave a '--->' marker, so it's
# easier to trace visually the message chain when debugging. Each
# handler prints its message at the end.
self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
self.log.debug(' Content: %s\n --->\n ', msg['content'])
if not self.should_handle(stream, msg, idents):
return
handler = self.shell_handlers.get(msg_type, None)
if handler is None:
self.log.warning("Unknown message type: %r", msg_type)
else:
self.log.debug("%s: %s", msg_type, msg)
try:
self.pre_handler_hook()
except Exception:
self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True)
try:
yield gen.maybe_future(handler(stream, idents, msg))
except Exception:
self.log.error("Exception in message handler:", exc_info=True)
finally:
try:
self.post_handler_hook()
except Exception:
self.log.debug("Unable to signal in post_handler_hook:", exc_info=True)
sys.stdout.flush()
sys.stderr.flush()
self._publish_status(u'idle')
# flush to ensure reply is sent before
# handling the next request
stream.flush(zmq.POLLOUT)
def pre_handler_hook(self):
"""Hook to execute before calling message handler"""
# ensure default_int_handler during handler call
self.saved_sigint_handler = signal(SIGINT, default_int_handler)
def post_handler_hook(self):
"""Hook to execute after calling message handler"""
signal(SIGINT, self.saved_sigint_handler)
def enter_eventloop(self):
"""enter eventloop"""
self.log.info("Entering eventloop %s", self.eventloop)
# record handle, so we can check when this changes
eventloop = self.eventloop
if eventloop is None:
self.log.info("Exiting as there is no eventloop")
return
def advance_eventloop():
# check if eventloop changed:
if self.eventloop is not eventloop:
self.log.info("exiting eventloop %s", eventloop)
return
if self.msg_queue.qsize():
self.log.debug("Delaying eventloop due to waiting messages")
# still messages to process, make the eventloop wait
schedule_next()
return
self.log.debug("Advancing eventloop %s", eventloop)
try:
eventloop(self)
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
self.log.error("KeyboardInterrupt caught in kernel")
pass
if self.eventloop is eventloop:
# schedule advance again
schedule_next()
def schedule_next():
"""Schedule the next advance of the eventloop"""
# flush the eventloop every so often,
# giving us a chance to handle messages in the meantime
self.log.debug("Scheduling eventloop advance")
self.io_loop.call_later(1, advance_eventloop)
# begin polling the eventloop
schedule_next()
@gen.coroutine
def do_one_iteration(self):
"""Process a single shell message
Any pending control messages will be flushed as well
.. versionchanged:: 5
This is now a coroutine
"""
# flush messages off of shell streams into the message queue
for stream in self.shell_streams:
stream.flush()
# process all messages higher priority than shell (control),
# and at most one shell message per iteration
priority = 0
while priority is not None and priority < SHELL_PRIORITY:
priority = yield self.process_one(wait=False)
@gen.coroutine
def process_one(self, wait=True):
"""Process one request
Returns priority of the message handled.
Returns None if no message was handled.
"""
if wait:
priority, t, dispatch, args = yield self.msg_queue.get()
else:
try:
priority, t, dispatch, args = self.msg_queue.get_nowait()
except QueueEmpty:
return None
yield gen.maybe_future(dispatch(*args))
@gen.coroutine
def dispatch_queue(self):
"""Coroutine to preserve order of message handling
Ensures that only one message is processing at a time,
even when the handler is async
"""
while True:
# ensure control stream is flushed before processing shell messages
if self.control_stream:
self.control_stream.flush()
# receive the next message and handle it
try:
yield self.process_one()
except Exception:
self.log.exception("Error in message handler")
_message_counter = Any(
help="""Monotonic counter of messages
Ensures messages of the same priority are handled in arrival order.
""",
)
@default('_message_counter')
def _message_counter_default(self):
return itertools.count()
def schedule_dispatch(self, priority, dispatch, *args):
"""schedule a message for dispatch"""
idx = next(self._message_counter)
self.msg_queue.put_nowait(
(
priority,
idx,
dispatch,
args,
)
)
# ensure the eventloop wakes up
self.io_loop.add_callback(lambda: None)
def start(self):
"""register dispatchers for streams"""
self.io_loop = ioloop.IOLoop.current()
self.msg_queue = PriorityQueue()
self.io_loop.add_callback(self.dispatch_queue)
if self.control_stream:
self.control_stream.on_recv(
partial(
self.schedule_dispatch,
CONTROL_PRIORITY,
self.dispatch_control,
),
copy=False,
)
for s in self.shell_streams:
if s is self.control_stream:
continue
s.on_recv(
partial(
self.schedule_dispatch,
SHELL_PRIORITY,
self.dispatch_shell,
s,
),
copy=False,
)
# publish idle status
self._publish_status('starting')
def record_ports(self, ports):
"""Record the ports that this kernel is using.
The creator of the Kernel instance must call this methods if they
want the :meth:`connect_request` method to return the port numbers.
"""
self._recorded_ports = ports
#---------------------------------------------------------------------------
# Kernel request handlers
#---------------------------------------------------------------------------
def _publish_execute_input(self, code, parent, execution_count):
"""Publish the code request on the iopub stream."""
self.session.send(self.iopub_socket, u'execute_input',
{u'code':code, u'execution_count': execution_count},
parent=parent, ident=self._topic('execute_input')
)
def _publish_status(self, status, parent=None):
"""send status (busy/idle) on IOPub"""
self.session.send(self.iopub_socket,
u'status',
{u'execution_state': status},
parent=parent or self._parent_header,
ident=self._topic('status'),
)
def set_parent(self, ident, parent):
"""Set the current parent_header
Side effects (IOPub messages) and replies are associated with
the request that caused them via the parent_header.
The parent identity is used to route input_request messages
on the stdin channel.
"""
self._parent_ident = ident
self._parent_header = parent
def send_response(self, stream, msg_or_type, content=None, ident=None,
buffers=None, track=False, header=None, metadata=None):
"""Send a response to the message we're currently processing.
This accepts all the parameters of :meth:`jupyter_client.session.Session.send`
except ``parent``.
This relies on :meth:`set_parent` having been called for the current
message.
"""
return self.session.send(stream, msg_or_type, content, self._parent_header,
ident, buffers, track, header, metadata)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of execution requests.
"""
# FIXME: `started` is part of ipyparallel
# Remove for ipykernel 5.0
return {
'started': now(),
}
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
return metadata
@gen.coroutine
def execute_request(self, stream, ident, parent):
"""handle an execute_request"""
try:
content = parent[u'content']
code = py3compat.cast_unicode_py2(content[u'code'])
silent = content[u'silent']
store_history = content.get(u'store_history', not silent)
user_expressions = content.get('user_expressions', {})
allow_stdin = content.get('allow_stdin', False)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", parent)
return
stop_on_error = content.get('stop_on_error', True)
metadata = self.init_metadata(parent)
# Re-broadcast our input for the benefit of listening clients, and
# start computing output
if not silent:
self.execution_count += 1
self._publish_execute_input(code, parent, self.execution_count)
reply_content = yield gen.maybe_future(
self.do_execute(
code, silent, store_history,
user_expressions, allow_stdin,
)
)
# Flush output before sending the reply.
sys.stdout.flush()
sys.stderr.flush()
# FIXME: on rare occasions, the flush doesn't seem to make it to the
# clients... This seems to mitigate the problem, but we definitely need
# to better understand what's going on.
if self._execute_sleep:
time.sleep(self._execute_sleep)
# Send the reply.
reply_content = json_clean(reply_content)
metadata = self.finish_metadata(parent, metadata, reply_content)
reply_msg = self.session.send(stream, u'execute_reply',
reply_content, parent, metadata=metadata,
ident=ident)
self.log.debug("%s", reply_msg)
if not silent and reply_msg['content']['status'] == u'error' and stop_on_error:
yield self._abort_queues()
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
"""Execute user code. Must be overridden by subclasses.
"""
raise NotImplementedError
@gen.coroutine
def complete_request(self, stream, ident, parent):
content = parent['content']
code = content['code']
cursor_pos = content['cursor_pos']
matches = yield gen.maybe_future(self.do_complete(code, cursor_pos))
matches = json_clean(matches)
completion_msg = self.session.send(stream, 'complete_reply',
matches, parent, ident)
def do_complete(self, code, cursor_pos):
"""Override in subclasses to find completions.
"""
return {'matches' : [],
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos,
'metadata' : {},
'status' : 'ok'}
@gen.coroutine
def inspect_request(self, stream, ident, parent):
content = parent['content']
reply_content = yield gen.maybe_future(
self.do_inspect(
content['code'], content['cursor_pos'],
content.get('detail_level', 0),
)
)
# Before we send this object over, we scrub it for JSON usage
reply_content = json_clean(reply_content)
msg = self.session.send(stream, 'inspect_reply',
reply_content, parent, ident)
self.log.debug("%s", msg)
def do_inspect(self, code, cursor_pos, detail_level=0):
"""Override in subclasses to allow introspection.
"""
return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False}
@gen.coroutine
def history_request(self, stream, ident, parent):
content = parent['content']
reply_content = yield gen.maybe_future(self.do_history(**content))
reply_content = json_clean(reply_content)
msg = self.session.send(stream, 'history_reply',
reply_content, parent, ident)
self.log.debug("%s", msg)
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
"""Override in subclasses to access history.
"""
return {'status': 'ok', 'history': []}
def connect_request(self, stream, ident, parent):
if self._recorded_ports is not None:
content = self._recorded_ports.copy()
else:
content = {}
content['status'] = 'ok'
msg = self.session.send(stream, 'connect_reply',
content, parent, ident)
self.log.debug("%s", msg)
@property
def kernel_info(self):
return {
'protocol_version': kernel_protocol_version,
'implementation': self.implementation,
'implementation_version': self.implementation_version,
'language_info': self.language_info,
'banner': self.banner,
'help_links': self.help_links,
}
def kernel_info_request(self, stream, ident, parent):
content = {'status': 'ok'}
content.update(self.kernel_info)
msg = self.session.send(stream, 'kernel_info_reply',
content, parent, ident)
self.log.debug("%s", msg)
def comm_info_request(self, stream, ident, parent):
content = parent['content']
target_name = content.get('target_name', None)
# Should this be moved to ipkernel?
if hasattr(self, 'comm_manager'):
comms = {
k: dict(target_name=v.target_name)
for (k, v) in self.comm_manager.comms.items()
if v.target_name == target_name or target_name is None
}
else:
comms = {}
reply_content = dict(comms=comms, status='ok')
msg = self.session.send(stream, 'comm_info_reply',
reply_content, parent, ident)
self.log.debug("%s", msg)
@gen.coroutine
def shutdown_request(self, stream, ident, parent):
content = yield gen.maybe_future(self.do_shutdown(parent['content']['restart']))
self.session.send(stream, u'shutdown_reply', content, parent, ident=ident)
# same content, but different msg_id for broadcasting on IOPub
self._shutdown_message = self.session.msg(u'shutdown_reply',
content, parent
)
self._at_shutdown()
# call sys.exit after a short delay
loop = ioloop.IOLoop.current()
loop.add_timeout(time.time()+0.1, loop.stop)
def do_shutdown(self, restart):
"""Override in subclasses to do things when the frontend shuts down the
kernel.
"""
return {'status': 'ok', 'restart': restart}
@gen.coroutine
def is_complete_request(self, stream, ident, parent):
content = parent['content']
code = content['code']
reply_content = yield gen.maybe_future(self.do_is_complete(code))
reply_content = json_clean(reply_content)
reply_msg = self.session.send(stream, 'is_complete_reply',
reply_content, parent, ident)
self.log.debug("%s", reply_msg)
def do_is_complete(self, code):
"""Override in subclasses to find completions.
"""
return {'status' : 'unknown',
}
#---------------------------------------------------------------------------
# Engine methods (DEPRECATED)
#---------------------------------------------------------------------------
def apply_request(self, stream, ident, parent):
self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.")
try:
content = parent[u'content']
bufs = parent[u'buffers']
msg_id = parent['header']['msg_id']
except:
self.log.error("Got bad msg: %s", parent, exc_info=True)
return
md = self.init_metadata(parent)
reply_content, result_buf = self.do_apply(content, bufs, msg_id, md)
# flush i/o
sys.stdout.flush()
sys.stderr.flush()
md = self.finish_metadata(parent, md, reply_content)
self.session.send(stream, u'apply_reply', reply_content,
parent=parent, ident=ident,buffers=result_buf, metadata=md)
def do_apply(self, content, bufs, msg_id, reply_metadata):
"""DEPRECATED"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Control messages (DEPRECATED)
#---------------------------------------------------------------------------
def abort_request(self, stream, ident, parent):
"""abort a specific msg by id"""
self.log.warning("abort_request is deprecated in kernel_base. It is only part of IPython parallel")
msg_ids = parent['content'].get('msg_ids', None)
if isinstance(msg_ids, string_types):
msg_ids = [msg_ids]
if not msg_ids:
self._abort_queues()
for mid in msg_ids:
self.aborted.add(str(mid))
content = dict(status='ok')
reply_msg = self.session.send(stream, 'abort_reply', content=content,
parent=parent, ident=ident)
self.log.debug("%s", reply_msg)
def clear_request(self, stream, idents, parent):
"""Clear our namespace."""
self.log.warning("clear_request is deprecated in kernel_base. It is only part of IPython parallel")
content = self.do_clear()
self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
content = content)
def do_clear(self):
"""DEPRECATED since 4.0.3"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _topic(self, topic):
"""prefixed topic for IOPub messages"""
base = "kernel.%s" % self.ident
return py3compat.cast_bytes("%s.%s" % (base, topic))
_aborting = Bool(False)
@gen.coroutine
def _abort_queues(self):
for stream in self.shell_streams:
stream.flush()
self._aborting = True
self.schedule_dispatch(
ABORT_PRIORITY,
self._dispatch_abort,
)
@gen.coroutine
def _dispatch_abort(self):
self.log.info("Finishing abort")
yield gen.sleep(self.stop_on_error_timeout)
self._aborting = False
def _send_abort_reply(self, stream, msg, idents):
"""Send a reply to an aborted request"""
self.log.info("Aborting:")
self.log.info("%s", msg)
reply_type = msg['header']['msg_type'].rsplit('_', 1)[0] + '_reply'
status = {'status': 'aborted'}
md = {'engine': self.ident}
md.update(status)
self.session.send(
stream, reply_type, metadata=md,
content=status, parent=msg, ident=idents,
)
def _no_raw_input(self):
"""Raise StdinNotImplentedError if active frontend doesn't support
stdin."""
raise StdinNotImplementedError("raw_input was called, but this "
"frontend does not support stdin.")
def getpass(self, prompt='', stream=None):
"""Forward getpass to frontends
Raises
------
StdinNotImplentedError if active frontend doesn't support stdin.
"""
if not self._allow_stdin:
raise StdinNotImplementedError(
"getpass was called, but this frontend does not support input requests."
)
if stream is not None:
import warnings
warnings.warn("The `stream` parameter of `getpass.getpass` will have no effect when using ipykernel",
UserWarning, stacklevel=2)
return self._input_request(prompt,
self._parent_ident,
self._parent_header,
password=True,
)
def raw_input(self, prompt=''):
"""Forward raw_input to frontends
Raises
------
StdinNotImplentedError if active frontend doesn't support stdin.
"""
if not self._allow_stdin:
raise StdinNotImplementedError(
"raw_input was called, but this frontend does not support input requests."
)
return self._input_request(str(prompt),
self._parent_ident,
self._parent_header,
password=False,
)
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# flush the stdin socket, to purge stale replies
while True:
try:
self.stdin_socket.recv_multipart(zmq.NOBLOCK)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
break
else:
raise
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
self.session.send(self.stdin_socket, u'input_request', content, parent,
ident=ident)
# Await a response.
while True:
try:
# Use polling with select() so KeyboardInterrupts can get
# through; doing a blocking recv() means stdin reads are
# uninterruptible on Windows. We need a timeout because
# zmq.select() is also uninterruptible, but at least this
# way reads get noticed immediately and KeyboardInterrupts
# get noticed fairly quickly by human response time standards.
rlist, _, xlist = zmq.select(
[self.stdin_socket], [], [self.stdin_socket], 0.01
)
if rlist or xlist:
ident, reply = self.session.recv(self.stdin_socket)
if (ident, reply) != (None, None):
break
except KeyboardInterrupt:
# re-raise KeyboardInterrupt, to truncate traceback
raise KeyboardInterrupt("Interrupted by user") from None
except Exception as e:
self.log.warning("Invalid Message:", exc_info=True)
try:
value = py3compat.unicode_to_str(reply['content']['value'])
except:
self.log.error("Bad input_reply: %s", parent)
value = ''
if value == '\x04':
# EOF
raise EOFError
return value
def _at_shutdown(self):
"""Actions taken at shutdown by the kernel, called by python's atexit.
"""
if self._shutdown_message is not None:
self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
self.log.debug("%s", self._shutdown_message)
[ s.flush(zmq.POLLOUT) for s in self.shell_streams ]

View file

@ -0,0 +1,188 @@
"""The IPython kernel spec for Jupyter"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import errno
import json
import os
import shutil
import sys
import tempfile
from jupyter_client.kernelspec import KernelSpecManager
pjoin = os.path.join
KERNEL_NAME = 'python%i' % sys.version_info[0]
# path to kernelspec resources
RESOURCES = pjoin(os.path.dirname(__file__), 'resources')
def make_ipkernel_cmd(mod='ipykernel_launcher', executable=None, extra_arguments=None, **kw):
"""Build Popen command list for launching an IPython kernel.
Parameters
----------
mod : str, optional (default 'ipykernel')
A string of an IPython module whose __main__ starts an IPython kernel
executable : str, optional (default sys.executable)
The Python executable to use for the kernel process.
extra_arguments : list, optional
A list of extra arguments to pass when executing the launch code.
Returns
-------
A Popen command list
"""
if executable is None:
executable = sys.executable
extra_arguments = extra_arguments or []
arguments = [executable, '-m', mod, '-f', '{connection_file}']
arguments.extend(extra_arguments)
return arguments
def get_kernel_dict(extra_arguments=None):
"""Construct dict for kernel.json"""
return {
'argv': make_ipkernel_cmd(extra_arguments=extra_arguments),
'display_name': 'Python %i' % sys.version_info[0],
'language': 'python',
}
def write_kernel_spec(path=None, overrides=None, extra_arguments=None):
"""Write a kernel spec directory to `path`
If `path` is not specified, a temporary directory is created.
If `overrides` is given, the kernelspec JSON is updated before writing.
The path to the kernelspec is always returned.
"""
if path is None:
path = os.path.join(tempfile.mkdtemp(suffix='_kernels'), KERNEL_NAME)
# stage resources
shutil.copytree(RESOURCES, path)
# write kernel.json
kernel_dict = get_kernel_dict(extra_arguments)
if overrides:
kernel_dict.update(overrides)
with open(pjoin(path, 'kernel.json'), 'w') as f:
json.dump(kernel_dict, f, indent=1)
return path
def install(kernel_spec_manager=None, user=False, kernel_name=KERNEL_NAME, display_name=None,
prefix=None, profile=None):
"""Install the IPython kernelspec for Jupyter
Parameters
----------
kernel_spec_manager: KernelSpecManager [optional]
A KernelSpecManager to use for installation.
If none provided, a default instance will be created.
user: bool [default: False]
Whether to do a user-only install, or system-wide.
kernel_name: str, optional
Specify a name for the kernelspec.
This is needed for having multiple IPython kernels for different environments.
display_name: str, optional
Specify the display name for the kernelspec
profile: str, optional
Specify a custom profile to be loaded by the kernel.
prefix: str, optional
Specify an install prefix for the kernelspec.
This is needed to install into a non-default location, such as a conda/virtual-env.
Returns
-------
The path where the kernelspec was installed.
"""
if kernel_spec_manager is None:
kernel_spec_manager = KernelSpecManager()
if (kernel_name != KERNEL_NAME) and (display_name is None):
# kernel_name is specified and display_name is not
# default display_name to kernel_name
display_name = kernel_name
overrides = {}
if display_name:
overrides["display_name"] = display_name
if profile:
extra_arguments = ["--profile", profile]
if not display_name:
# add the profile to the default display name
overrides["display_name"] = 'Python %i [profile=%s]' % (sys.version_info[0], profile)
else:
extra_arguments = None
path = write_kernel_spec(overrides=overrides, extra_arguments=extra_arguments)
dest = kernel_spec_manager.install_kernel_spec(
path, kernel_name=kernel_name, user=user, prefix=prefix)
# cleanup afterward
shutil.rmtree(path)
return dest
# Entrypoint
from traitlets.config import Application
class InstallIPythonKernelSpecApp(Application):
"""Dummy app wrapping argparse"""
name = 'ipython-kernel-install'
def initialize(self, argv=None):
if argv is None:
argv = sys.argv[1:]
self.argv = argv
def start(self):
import argparse
parser = argparse.ArgumentParser(prog=self.name,
description="Install the IPython kernel spec.")
parser.add_argument('--user', action='store_true',
help="Install for the current user instead of system-wide")
parser.add_argument('--name', type=str, default=KERNEL_NAME,
help="Specify a name for the kernelspec."
" This is needed to have multiple IPython kernels at the same time.")
parser.add_argument('--display-name', type=str,
help="Specify the display name for the kernelspec."
" This is helpful when you have multiple IPython kernels.")
parser.add_argument('--profile', type=str,
help="Specify an IPython profile to load. "
"This can be used to create custom versions of the kernel.")
parser.add_argument('--prefix', type=str,
help="Specify an install prefix for the kernelspec."
" This is needed to install into a non-default location, such as a conda/virtual-env.")
parser.add_argument('--sys-prefix', action='store_const', const=sys.prefix, dest='prefix',
help="Install to Python's sys.prefix."
" Shorthand for --prefix='%s'. For use in conda/virtual-envs." % sys.prefix)
opts = parser.parse_args(self.argv)
try:
dest = install(user=opts.user, kernel_name=opts.name, profile=opts.profile,
prefix=opts.prefix, display_name=opts.display_name)
except OSError as e:
if e.errno == errno.EACCES:
print(e, file=sys.stderr)
if opts.user:
print("Perhaps you want `sudo` or `--user`?", file=sys.stderr)
self.exit(1)
raise
print("Installed kernelspec %s in %s" % (opts.name, dest))
if __name__ == '__main__':
InstallIPythonKernelSpecApp.launch_instance()

View file

@ -0,0 +1,23 @@
from logging import INFO, DEBUG, WARN, ERROR, FATAL
from zmq.log.handlers import PUBHandler
import warnings
warnings.warn("ipykernel.log is deprecated. It has moved to ipyparallel.engine.log", DeprecationWarning)
class EnginePUBHandler(PUBHandler):
"""A simple PUBHandler subclass that sets root_topic"""
engine=None
def __init__(self, engine, *args, **kwargs):
PUBHandler.__init__(self,*args, **kwargs)
self.engine = engine
@property
def root_topic(self):
"""this is a property, in case the handler is created
before the engine gets registered with an id"""
if isinstance(getattr(self.engine, 'id', None), int):
return "engine.%i"%self.engine.id
else:
return "engine"

View file

@ -0,0 +1,117 @@
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
import ctypes
except:
ctypes = None
import os
import platform
import signal
import time
try:
from _thread import interrupt_main # Py 3
except ImportError:
from thread import interrupt_main # Py 2
from threading import Thread
from traitlets.log import get_logger
import warnings
class ParentPollerUnix(Thread):
""" A Unix-specific daemon thread that terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self):
super(ParentPollerUnix, self).__init__()
self.daemon = True
def run(self):
# We cannot use os.waitpid because it works only for child processes.
from errno import EINTR
while True:
try:
if os.getppid() == 1:
get_logger().warning("Parent appears to have exited, shutting down.")
os._exit(1)
time.sleep(1.0)
except OSError as e:
if e.errno == EINTR:
continue
raise
class ParentPollerWindows(Thread):
""" A Windows-specific daemon thread that listens for a special event that
signals an interrupt and, optionally, terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self, interrupt_handle=None, parent_handle=None):
""" Create the poller. At least one of the optional parameters must be
provided.
Parameters
----------
interrupt_handle : HANDLE (int), optional
If provided, the program will generate a Ctrl+C event when this
handle is signaled.
parent_handle : HANDLE (int), optional
If provided, the program will terminate immediately when this
handle is signaled.
"""
assert(interrupt_handle or parent_handle)
super(ParentPollerWindows, self).__init__()
if ctypes is None:
raise ImportError("ParentPollerWindows requires ctypes")
self.daemon = True
self.interrupt_handle = interrupt_handle
self.parent_handle = parent_handle
def run(self):
""" Run the poll loop. This method never returns.
"""
try:
from _winapi import WAIT_OBJECT_0, INFINITE
except ImportError:
from _subprocess import WAIT_OBJECT_0, INFINITE
# Build the list of handle to listen on.
handles = []
if self.interrupt_handle:
handles.append(self.interrupt_handle)
if self.parent_handle:
handles.append(self.parent_handle)
arch = platform.architecture()[0]
c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int
# Listen forever.
while True:
result = ctypes.windll.kernel32.WaitForMultipleObjects(
len(handles), # nCount
(c_int * len(handles))(*handles), # lpHandles
False, # bWaitAll
INFINITE) # dwMilliseconds
if WAIT_OBJECT_0 <= result < len(handles):
handle = handles[result - WAIT_OBJECT_0]
if handle == self.interrupt_handle:
# check if signal handler is callable
# to avoid 'int not callable' error (Python issue #23395)
if callable(signal.getsignal(signal.SIGINT)):
interrupt_main()
elif handle == self.parent_handle:
get_logger().warning("Parent appears to have exited, shutting down.")
os._exit(1)
elif result < 0:
# wait failed, just give up and stop polling.
warnings.warn("""Parent poll failed. If the frontend dies,
the kernel may be left running. Please let us know
about your system (bitness, Python, etc.) at
ipython-dev@scipy.org""")
return

View file

@ -0,0 +1,455 @@
# encoding: utf-8
"""Pickle related utilities. Perhaps this should be called 'can'."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
warnings.warn("ipykernel.pickleutil is deprecated. It has moved to ipyparallel.", DeprecationWarning)
import copy
import sys
from types import FunctionType
try:
import cPickle as pickle
except ImportError:
import pickle
from ipython_genutils import py3compat
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types, iteritems, buffer_to_bytes, buffer_to_bytes_py2
# This registers a hook when it's imported
try:
# available since ipyparallel 5.1.1
from ipyparallel.serialize import codeutil
except ImportError:
# Deprecated since ipykernel 4.3.1
from ipykernel import codeutil
from traitlets.log import get_logger
if py3compat.PY3:
buffer = memoryview
class_type = type
else:
from types import ClassType
class_type = (type, ClassType)
try:
PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL
except AttributeError:
PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
def _get_cell_type(a=None):
"""the type of a closure cell doesn't seem to be importable,
so just create one
"""
def inner():
return a
return type(py3compat.get_closure(inner)[0])
cell_type = _get_cell_type()
#-------------------------------------------------------------------------------
# Functions
#-------------------------------------------------------------------------------
def interactive(f):
"""decorator for making functions appear as interactively defined.
This results in the function being linked to the user_ns as globals()
instead of the module globals().
"""
# build new FunctionType, so it can have the right globals
# interactive functions never have closures, that's kind of the point
if isinstance(f, FunctionType):
mainmod = __import__('__main__')
f = FunctionType(f.__code__, mainmod.__dict__,
f.__name__, f.__defaults__,
)
# associate with __main__ for uncanning
f.__module__ = '__main__'
return f
def use_dill():
"""use dill to expand serialization support
adds support for object methods and closures to serialization.
"""
# import dill causes most of the magic
import dill
# dill doesn't work with cPickle,
# tell the two relevant modules to use plain pickle
global pickle
pickle = dill
try:
from ipykernel import serialize
except ImportError:
pass
else:
serialize.pickle = dill
# disable special function handling, let dill take care of it
can_map.pop(FunctionType, None)
def use_cloudpickle():
"""use cloudpickle to expand serialization support
adds support for object methods and closures to serialization.
"""
import cloudpickle
global pickle
pickle = cloudpickle
try:
from ipykernel import serialize
except ImportError:
pass
else:
serialize.pickle = cloudpickle
# disable special function handling, let cloudpickle take care of it
can_map.pop(FunctionType, None)
#-------------------------------------------------------------------------------
# Classes
#-------------------------------------------------------------------------------
class CannedObject(object):
def __init__(self, obj, keys=[], hook=None):
"""can an object for safe pickling
Parameters
==========
obj:
The object to be canned
keys: list (optional)
list of attribute names that will be explicitly canned / uncanned
hook: callable (optional)
An optional extra callable,
which can do additional processing of the uncanned object.
large data may be offloaded into the buffers list,
used for zero-copy transfers.
"""
self.keys = keys
self.obj = copy.copy(obj)
self.hook = can(hook)
for key in keys:
setattr(self.obj, key, can(getattr(obj, key)))
self.buffers = []
def get_object(self, g=None):
if g is None:
g = {}
obj = self.obj
for key in self.keys:
setattr(obj, key, uncan(getattr(obj, key), g))
if self.hook:
self.hook = uncan(self.hook, g)
self.hook(obj, g)
return self.obj
class Reference(CannedObject):
"""object for wrapping a remote reference by name."""
def __init__(self, name):
if not isinstance(name, string_types):
raise TypeError("illegal name: %r"%name)
self.name = name
self.buffers = []
def __repr__(self):
return "<Reference: %r>"%self.name
def get_object(self, g=None):
if g is None:
g = {}
return eval(self.name, g)
class CannedCell(CannedObject):
"""Can a closure cell"""
def __init__(self, cell):
self.cell_contents = can(cell.cell_contents)
def get_object(self, g=None):
cell_contents = uncan(self.cell_contents, g)
def inner():
return cell_contents
return py3compat.get_closure(inner)[0]
class CannedFunction(CannedObject):
def __init__(self, f):
self._check_type(f)
self.code = f.__code__
if f.__defaults__:
self.defaults = [ can(fd) for fd in f.__defaults__ ]
else:
self.defaults = None
closure = py3compat.get_closure(f)
if closure:
self.closure = tuple( can(cell) for cell in closure )
else:
self.closure = None
self.module = f.__module__ or '__main__'
self.__name__ = f.__name__
self.buffers = []
def _check_type(self, obj):
assert isinstance(obj, FunctionType), "Not a function type"
def get_object(self, g=None):
# try to load function back into its module:
if not self.module.startswith('__'):
__import__(self.module)
g = sys.modules[self.module].__dict__
if g is None:
g = {}
if self.defaults:
defaults = tuple(uncan(cfd, g) for cfd in self.defaults)
else:
defaults = None
if self.closure:
closure = tuple(uncan(cell, g) for cell in self.closure)
else:
closure = None
newFunc = FunctionType(self.code, g, self.__name__, defaults, closure)
return newFunc
class CannedClass(CannedObject):
def __init__(self, cls):
self._check_type(cls)
self.name = cls.__name__
self.old_style = not isinstance(cls, type)
self._canned_dict = {}
for k,v in cls.__dict__.items():
if k not in ('__weakref__', '__dict__'):
self._canned_dict[k] = can(v)
if self.old_style:
mro = []
else:
mro = cls.mro()
self.parents = [ can(c) for c in mro[1:] ]
self.buffers = []
def _check_type(self, obj):
assert isinstance(obj, class_type), "Not a class type"
def get_object(self, g=None):
parents = tuple(uncan(p, g) for p in self.parents)
return type(self.name, parents, uncan_dict(self._canned_dict, g=g))
class CannedArray(CannedObject):
def __init__(self, obj):
from numpy import ascontiguousarray
self.shape = obj.shape
self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str
self.pickled = False
if sum(obj.shape) == 0:
self.pickled = True
elif obj.dtype == 'O':
# can't handle object dtype with buffer approach
self.pickled = True
elif obj.dtype.fields and any(dt == 'O' for dt,sz in obj.dtype.fields.values()):
self.pickled = True
if self.pickled:
# just pickle it
self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)]
else:
# ensure contiguous
obj = ascontiguousarray(obj, dtype=None)
self.buffers = [buffer(obj)]
def get_object(self, g=None):
from numpy import frombuffer
data = self.buffers[0]
if self.pickled:
# we just pickled it
return pickle.loads(buffer_to_bytes_py2(data))
else:
if not py3compat.PY3 and isinstance(data, memoryview):
# frombuffer doesn't accept memoryviews on Python 2,
# so cast to old-style buffer
data = buffer(data.tobytes())
return frombuffer(data, dtype=self.dtype).reshape(self.shape)
class CannedBytes(CannedObject):
wrap = staticmethod(buffer_to_bytes)
def __init__(self, obj):
self.buffers = [obj]
def get_object(self, g=None):
data = self.buffers[0]
return self.wrap(data)
class CannedBuffer(CannedBytes):
wrap = buffer
class CannedMemoryView(CannedBytes):
wrap = memoryview
#-------------------------------------------------------------------------------
# Functions
#-------------------------------------------------------------------------------
def _import_mapping(mapping, original=None):
"""import any string-keys in a type mapping
"""
log = get_logger()
log.debug("Importing canning map")
for key,value in list(mapping.items()):
if isinstance(key, string_types):
try:
cls = import_item(key)
except Exception:
if original and key not in original:
# only message on user-added classes
log.error("canning class not importable: %r", key, exc_info=True)
mapping.pop(key)
else:
mapping[cls] = mapping.pop(key)
def istype(obj, check):
"""like isinstance(obj, check), but strict
This won't catch subclasses.
"""
if isinstance(check, tuple):
for cls in check:
if type(obj) is cls:
return True
return False
else:
return type(obj) is check
def can(obj):
"""prepare an object for pickling"""
import_needed = False
for cls,canner in iteritems(can_map):
if isinstance(cls, string_types):
import_needed = True
break
elif istype(obj, cls):
return canner(obj)
if import_needed:
# perform can_map imports, then try again
# this will usually only happen once
_import_mapping(can_map, _original_can_map)
return can(obj)
return obj
def can_class(obj):
if isinstance(obj, class_type) and obj.__module__ == '__main__':
return CannedClass(obj)
else:
return obj
def can_dict(obj):
"""can the *values* of a dict"""
if istype(obj, dict):
newobj = {}
for k, v in iteritems(obj):
newobj[k] = can(v)
return newobj
else:
return obj
sequence_types = (list, tuple, set)
def can_sequence(obj):
"""can the elements of a sequence"""
if istype(obj, sequence_types):
t = type(obj)
return t([can(i) for i in obj])
else:
return obj
def uncan(obj, g=None):
"""invert canning"""
import_needed = False
for cls,uncanner in iteritems(uncan_map):
if isinstance(cls, string_types):
import_needed = True
break
elif isinstance(obj, cls):
return uncanner(obj, g)
if import_needed:
# perform uncan_map imports, then try again
# this will usually only happen once
_import_mapping(uncan_map, _original_uncan_map)
return uncan(obj, g)
return obj
def uncan_dict(obj, g=None):
if istype(obj, dict):
newobj = {}
for k, v in iteritems(obj):
newobj[k] = uncan(v,g)
return newobj
else:
return obj
def uncan_sequence(obj, g=None):
if istype(obj, sequence_types):
t = type(obj)
return t([uncan(i,g) for i in obj])
else:
return obj
#-------------------------------------------------------------------------------
# API dictionaries
#-------------------------------------------------------------------------------
# These dicts can be extended for custom serialization of new objects
can_map = {
'numpy.ndarray' : CannedArray,
FunctionType : CannedFunction,
bytes : CannedBytes,
memoryview : CannedMemoryView,
cell_type : CannedCell,
class_type : can_class,
}
if buffer is not memoryview:
can_map[buffer] = CannedBuffer
uncan_map = {
CannedObject : lambda obj, g: obj.get_object(g),
dict : uncan_dict,
}
# for use in _import_mapping:
_original_can_map = can_map.copy()
_original_uncan_map = uncan_map.copy()

View file

@ -0,0 +1,200 @@
"""A matplotlib backend for publishing figures via display_data"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import matplotlib
from matplotlib.backends.backend_agg import (
new_figure_manager,
FigureCanvasAgg,
new_figure_manager_given_figure,
) # analysis: ignore
from matplotlib import colors
from matplotlib._pylab_helpers import Gcf
from IPython.core.getipython import get_ipython
from IPython.display import display
from .config import InlineBackend
def show(close=None, block=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
block : Not used.
The `block` parameter is a Matplotlib experimental parameter.
We accept it in the function signature for compatibility with other
backends.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
display(
figure_manager.canvas.figure,
metadata=_fetch_figure_metadata(figure_manager.canvas.figure)
)
finally:
show._to_draw = []
# only call close('all') if any to close
# close triggers gc.collect, which can be slow
if close and Gcf.get_all_fig_managers():
matplotlib.pyplot.close('all')
# This flag will be reset by draw_if_interactive when called
show._draw_called = False
# list of figures to draw when flush_figures is called
show._to_draw = []
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
manager = Gcf.get_active()
if manager is None:
return
fig = manager.canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: display(fig, metadata=_fetch_figure_metadata(fig))
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
def flush_figures():
"""Send all figures that changed
This is meant to be called automatically and will call show() if, during
prior code execution, there had been any calls to draw_if_interactive.
This function is meant to be used as a post_execute callback in IPython,
so user-caused errors are handled with showtraceback() instead of being
allowed to raise. If this function is not called from within IPython,
then these exceptions will raise.
"""
if not show._draw_called:
return
if InlineBackend.instance().close_figures:
# ignore the tracking, just draw and close all figures
try:
return show(True)
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
try:
# exclude any figures that were closed:
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
for fig in [ fig for fig in show._to_draw if fig in active ]:
try:
display(fig, metadata=_fetch_figure_metadata(fig))
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
finally:
# clear flags for next round
show._to_draw = []
show._draw_called = False
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
# figurecanvas. This is set here to a Agg canvas
# See https://github.com/matplotlib/matplotlib/pull/1125
FigureCanvas = FigureCanvasAgg
def _enable_matplotlib_integration():
"""Enable extra IPython matplotlib integration when we are loaded as the matplotlib backend."""
from matplotlib import get_backend
ip = get_ipython()
backend = get_backend()
if ip and backend == 'module://%s' % __name__:
from IPython.core.pylabtools import configure_inline_support, activate_matplotlib
try:
activate_matplotlib(backend)
configure_inline_support(ip, backend)
except (ImportError, AttributeError):
# bugs may cause a circular import on Python 2
def configure_once(*args):
activate_matplotlib(backend)
configure_inline_support(ip, backend)
ip.events.unregister('post_run_cell', configure_once)
ip.events.register('post_run_cell', configure_once)
_enable_matplotlib_integration()
def _fetch_figure_metadata(fig):
"""Get some metadata to help with displaying a figure."""
# determine if a background is needed for legibility
if _is_transparent(fig.get_facecolor()):
# the background is transparent
ticksLight = _is_light([label.get_color()
for axes in fig.axes
for axis in (axes.xaxis, axes.yaxis)
for label in axis.get_ticklabels()])
if ticksLight.size and (ticksLight == ticksLight[0]).all():
# there are one or more tick labels, all with the same lightness
return {'needs_background': 'dark' if ticksLight[0] else 'light'}
return None
def _is_light(color):
"""Determines if a color (or each of a sequence of colors) is light (as
opposed to dark). Based on ITU BT.601 luminance formula (see
https://stackoverflow.com/a/596241)."""
rgbaArr = colors.to_rgba_array(color)
return rgbaArr[:,:3].dot((.299, .587, .114)) > .5
def _is_transparent(color):
"""Determine transparency from alpha."""
rgba = colors.to_rgba(color)
return rgba[3] < .5

View file

@ -0,0 +1,110 @@
"""Configurable for configuring the IPython inline backend
This module does not import anything from matplotlib.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from traitlets.config.configurable import SingletonConfigurable
from traitlets import (
Dict, Instance, Set, Bool, TraitError, Unicode
)
#-----------------------------------------------------------------------------
# Configurable for inline backend options
#-----------------------------------------------------------------------------
def pil_available():
"""Test if PIL/Pillow is available"""
out = False
try:
from PIL import Image
out = True
except:
pass
return out
# inherit from InlineBackendConfig for deprecation purposes
class InlineBackendConfig(SingletonConfigurable):
pass
class InlineBackend(InlineBackendConfig):
"""An object to store configuration of the inline backend."""
# The typical default figure size is too large for inline use,
# so we shrink the figure size to 6x4, and tweak fonts to
# make that fit.
rc = Dict({'figure.figsize': (6.0,4.0),
# play nicely with white background in the Qt and notebook frontend
'figure.facecolor': (1,1,1,0),
'figure.edgecolor': (1,1,1,0),
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'font.size': 10,
# 72 dpi matches SVG/qtconsole
# this only affects PNG export, as SVG has no dpi setting
'figure.dpi': 72,
# 10pt still needs a little more room on the xlabel:
'figure.subplot.bottom' : .125
},
help="""Subset of matplotlib rcParams that should be different for the
inline backend."""
).tag(config=True)
figure_formats = Set({'png'},
help="""A set of figure formats to enable: 'png',
'retina', 'jpeg', 'svg', 'pdf'.""").tag(config=True)
def _update_figure_formatters(self):
if self.shell is not None:
from IPython.core.pylabtools import select_figure_formats
select_figure_formats(self.shell, self.figure_formats, **self.print_figure_kwargs)
def _figure_formats_changed(self, name, old, new):
if 'jpg' in new or 'jpeg' in new:
if not pil_available():
raise TraitError("Requires PIL/Pillow for JPG figures")
self._update_figure_formatters()
figure_format = Unicode(help="""The figure format to enable (deprecated
use `figure_formats` instead)""").tag(config=True)
def _figure_format_changed(self, name, old, new):
if new:
self.figure_formats = {new}
print_figure_kwargs = Dict({'bbox_inches' : 'tight'},
help="""Extra kwargs to be passed to fig.canvas.print_figure.
Logical examples include: bbox_inches, quality (for jpeg figures), etc.
"""
).tag(config=True)
_print_figure_kwargs_changed = _update_figure_formatters
close_figures = Bool(True,
help="""Close all figures at the end of each cell.
When True, ensures that each cell starts with no active figures, but it
also means that one must keep track of references in order to edit or
redraw figures in subsequent cells. This mode is ideal for the notebook,
where residual plots from other cells might be surprising.
When False, one must call figure() to create new figures. This means
that gcf() and getfigs() can reference figures created in other cells,
and the active figure can continue to be edited with pylab/pyplot
methods that reference the current active figure. This mode facilitates
iterative editing of figures, and behaves most consistently with
other matplotlib backends, but figure barriers between cells must
be explicit.
""").tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

View file

@ -0,0 +1,175 @@
"""serialization utilities for apply messages"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
warnings.warn("ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize", DeprecationWarning)
import pickle
from itertools import chain
from ipykernel.pickleutil import (
can, uncan, can_sequence, uncan_sequence, CannedObject,
istype, sequence_types, PICKLE_PROTOCOL,
)
from jupyter_client.session import MAX_ITEMS, MAX_BYTES
#-----------------------------------------------------------------------------
# Serialization Functions
#-----------------------------------------------------------------------------
def _extract_buffers(obj, threshold=MAX_BYTES):
"""extract buffers larger than a certain threshold"""
buffers = []
if isinstance(obj, CannedObject) and obj.buffers:
for i,buf in enumerate(obj.buffers):
if len(buf) > threshold:
# buffer larger than threshold, prevent pickling
obj.buffers[i] = None
buffers.append(buf)
# buffer too small for separate send, coerce to bytes
# because pickling buffer objects just results in broken pointers
elif isinstance(buf, memoryview):
obj.buffers[i] = buf.tobytes()
return buffers
def _restore_buffers(obj, buffers):
"""restore buffers extracted by """
if isinstance(obj, CannedObject) and obj.buffers:
for i,buf in enumerate(obj.buffers):
if buf is None:
obj.buffers[i] = buffers.pop(0)
def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
"""Serialize an object into a list of sendable buffers.
Parameters
----------
obj : object
The object to be serialized
buffer_threshold : int
The threshold (in bytes) for pulling out data buffers
to avoid pickling them.
item_threshold : int
The maximum number of items over which canning will iterate.
Containers (lists, dicts) larger than this will be pickled without
introspection.
Returns
-------
[bufs] : list of buffers representing the serialized object.
"""
buffers = []
if istype(obj, sequence_types) and len(obj) < item_threshold:
cobj = can_sequence(obj)
for c in cobj:
buffers.extend(_extract_buffers(c, buffer_threshold))
elif istype(obj, dict) and len(obj) < item_threshold:
cobj = {}
for k in sorted(obj):
c = can(obj[k])
buffers.extend(_extract_buffers(c, buffer_threshold))
cobj[k] = c
else:
cobj = can(obj)
buffers.extend(_extract_buffers(cobj, buffer_threshold))
buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))
return buffers
def deserialize_object(buffers, g=None):
"""reconstruct an object serialized by serialize_object from data buffers.
Parameters
----------
bufs : list of buffers/bytes
g : globals to be used when uncanning
Returns
-------
(newobj, bufs) : unpacked object, and the list of remaining unused buffers.
"""
bufs = list(buffers)
pobj = bufs.pop(0)
canned = pickle.loads(pobj)
if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
for c in canned:
_restore_buffers(c, bufs)
newobj = uncan_sequence(canned, g)
elif istype(canned, dict) and len(canned) < MAX_ITEMS:
newobj = {}
for k in sorted(canned):
c = canned[k]
_restore_buffers(c, bufs)
newobj[k] = uncan(c, g)
else:
_restore_buffers(canned, bufs)
newobj = uncan(canned, g)
return newobj, bufs
def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):
"""pack up a function, args, and kwargs to be sent over the wire
Each element of args/kwargs will be canned for special treatment,
but inspection will not go any deeper than that.
Any object whose data is larger than `threshold` will not have their data copied
(only numpy arrays and bytes/buffers support zero-copy)
Message will be a list of bytes/buffers of the format:
[ cf, pinfo, <arg_bufs>, <kwarg_bufs> ]
With length at least two + len(args) + len(kwargs)
"""
arg_bufs = list(chain.from_iterable(
serialize_object(arg, buffer_threshold, item_threshold) for arg in args))
kw_keys = sorted(kwargs.keys())
kwarg_bufs = list(chain.from_iterable(
serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys))
info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys)
msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)]
msg.append(pickle.dumps(info, PICKLE_PROTOCOL))
msg.extend(arg_bufs)
msg.extend(kwarg_bufs)
return msg
def unpack_apply_message(bufs, g=None, copy=True):
"""unpack f,args,kwargs from buffers packed by pack_apply_message()
Returns: original f,args,kwargs"""
bufs = list(bufs) # allow us to pop
assert len(bufs) >= 2, "not enough buffers!"
pf = bufs.pop(0)
f = uncan(pickle.loads(pf), g)
pinfo = bufs.pop(0)
info = pickle.loads(pinfo)
arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:]
args = []
for i in range(info['nargs']):
arg, arg_bufs = deserialize_object(arg_bufs, g)
args.append(arg)
args = tuple(args)
assert not arg_bufs, "Shouldn't be any arg bufs left over"
kwargs = {}
for key in info['kw_keys']:
kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
kwargs[key] = kwarg
assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
return f,args,kwargs

View file

@ -0,0 +1,49 @@
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import sys
import tempfile
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from jupyter_core import paths as jpaths
from IPython import paths as ipaths
from ipykernel.kernelspec import install
pjoin = os.path.join
tmp = None
patchers = []
def setup():
"""setup temporary env for tests"""
global tmp
tmp = tempfile.mkdtemp()
patchers[:] = [
patch.dict(os.environ, {
'HOME': tmp,
# Let tests work with --user install when HOME is changed:
'PYTHONPATH': os.pathsep.join(sys.path),
}),
]
for p in patchers:
p.start()
# install IPython in the temp home:
install(user=True)
def teardown():
for p in patchers:
p.stop()
try:
shutil.rmtree(tmp)
except (OSError, IOError):
# no such file
pass

Some files were not shown because too many files have changed in this diff Show more