Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,40 @@
"""
prompt_toolkit
==============
Author: Jonathan Slenders
Description: prompt_toolkit is a Library for building powerful interactive
command lines in Python. It can be a replacement for GNU
Readline, but it can be much more than that.
See the examples directory to learn about the usage.
Probably, to get started, you might also want to have a look at
`prompt_toolkit.shortcuts.prompt`.
"""
from .application import Application
from .formatted_text import ANSI, HTML
from .shortcuts import PromptSession, print_formatted_text, prompt
# Don't forget to update in `docs/conf.py`!
__version__ = "3.0.8"
# Version tuple.
VERSION = tuple(__version__.split("."))
__all__ = [
# Application.
"Application",
# Shortcuts.
"prompt",
"PromptSession",
"print_formatted_text",
# Formatted text.
"HTML",
"ANSI",
# Version info.
"__version__",
"VERSION",
]

View file

@ -0,0 +1,28 @@
from .application import Application
from .current import (
AppSession,
create_app_session,
get_app,
get_app_or_none,
get_app_session,
set_app,
)
from .dummy import DummyApplication
from .run_in_terminal import in_terminal, run_in_terminal
__all__ = [
# Application.
"Application",
# Current.
"AppSession",
"get_app_session",
"create_app_session",
"get_app",
"get_app_or_none",
"set_app",
# Dummy.
"DummyApplication",
# Run_in_terminal
"in_terminal",
"run_in_terminal",
]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,170 @@
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Generator, Optional
try:
from contextvars import ContextVar
except ImportError:
from prompt_toolkit.eventloop.dummy_contextvars import ContextVar # type: ignore
if TYPE_CHECKING:
from prompt_toolkit.input.defaults import Input
from prompt_toolkit.output.defaults import Output
from .application import Application
__all__ = [
"AppSession",
"get_app_session",
"get_app",
"get_app_or_none",
"set_app",
"create_app_session",
]
class AppSession:
"""
An AppSession is an interactive session, usually connected to one terminal.
Within one such session, interaction with many applications can happen, one
after the other.
The input/output device is not supposed to change during one session.
Warning: Always use the `create_app_session` function to create an
instance, so that it gets activated correctly.
:param input: Use this as a default input for all applications
running in this session, unless an input is passed to the `Application`
explicitely.
:param output: Use this as a default output.
"""
def __init__(
self, input: Optional["Input"] = None, output: Optional["Output"] = None
) -> None:
self._input = input
self._output = output
# The application will be set dynamically by the `set_app` context
# manager. This is called in the application itself.
self.app: Optional["Application[Any]"] = None
def __repr__(self) -> str:
return "AppSession(app=%r)" % (self.app,)
@property
def input(self) -> "Input":
if self._input is None:
from prompt_toolkit.input.defaults import create_input
self._input = create_input()
return self._input
@property
def output(self) -> "Output":
if self._output is None:
from prompt_toolkit.output.defaults import create_output
self._output = create_output()
return self._output
_current_app_session: ContextVar["AppSession"] = ContextVar(
"_current_app_session", default=AppSession()
)
def get_app_session() -> AppSession:
return _current_app_session.get()
def get_app() -> "Application[Any]":
"""
Get the current active (running) Application.
An :class:`.Application` is active during the
:meth:`.Application.run_async` call.
We assume that there can only be one :class:`.Application` active at the
same time. There is only one terminal window, with only one stdin and
stdout. This makes the code significantly easier than passing around the
:class:`.Application` everywhere.
If no :class:`.Application` is running, then return by default a
:class:`.DummyApplication`. For practical reasons, we prefer to not raise
an exception. This way, we don't have to check all over the place whether
an actual `Application` was returned.
(For applications like pymux where we can have more than one `Application`,
we'll use a work-around to handle that.)
"""
session = _current_app_session.get()
if session.app is not None:
return session.app
from .dummy import DummyApplication
return DummyApplication()
def get_app_or_none() -> Optional["Application[Any]"]:
"""
Get the current active (running) Application, or return `None` if no
application is running.
"""
session = _current_app_session.get()
return session.app
@contextmanager
def set_app(app: "Application[Any]") -> Generator[None, None, None]:
"""
Context manager that sets the given :class:`.Application` active in an
`AppSession`.
This should only be called by the `Application` itself.
The application will automatically be active while its running. If you want
the application to be active in other threads/coroutines, where that's not
the case, use `contextvars.copy_context()`, or use `Application.context` to
run it in the appropriate context.
"""
session = _current_app_session.get()
previous_app = session.app
session.app = app
try:
yield
finally:
session.app = previous_app
@contextmanager
def create_app_session(
input: Optional["Input"] = None, output: Optional["Output"] = None
) -> Generator[AppSession, None, None]:
"""
Create a separate AppSession.
This is useful if there can be multiple individual `AppSession`s going on.
Like in the case of an Telnet/SSH server. This functionality uses
contextvars and requires at least Python 3.7.
"""
if sys.version_info <= (3, 6):
raise RuntimeError("Application sessions require Python 3.7.")
# If no input/output is specified, fall back to the current input/output,
# whatever that is.
if input is None:
input = get_app_session().input
if output is None:
output = get_app_session().output
# Create new `AppSession` and activate.
session = AppSession(input=input, output=output)
token = _current_app_session.set(session)
try:
yield session
finally:
_current_app_session.reset(token)

View file

@ -0,0 +1,47 @@
from typing import Callable, Optional
from prompt_toolkit.formatted_text import AnyFormattedText
from prompt_toolkit.input import DummyInput
from prompt_toolkit.output import DummyOutput
from .application import Application
__all__ = [
"DummyApplication",
]
class DummyApplication(Application[None]):
"""
When no :class:`.Application` is running,
:func:`.get_app` will run an instance of this :class:`.DummyApplication` instead.
"""
def __init__(self) -> None:
super().__init__(output=DummyOutput(), input=DummyInput())
def run(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> None:
raise NotImplementedError("A DummyApplication is not supposed to run.")
async def run_async(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> None:
raise NotImplementedError("A DummyApplication is not supposed to run.")
async def run_system_command(
self,
command: str,
wait_for_enter: bool = True,
display_before_text: AnyFormattedText = "",
wait_text: str = "",
) -> None:
raise NotImplementedError
def suspend_to_background(self, suspend_group: bool = True) -> None:
raise NotImplementedError

View file

@ -0,0 +1,116 @@
"""
Tools for running functions on the terminal above the current application or prompt.
"""
from asyncio import Future, ensure_future
from typing import AsyncGenerator, Awaitable, Callable, TypeVar
from prompt_toolkit.eventloop import run_in_executor_with_context
from .current import get_app_or_none
try:
from contextlib import asynccontextmanager # type: ignore
except ImportError:
from prompt_toolkit.eventloop.async_context_manager import asynccontextmanager
__all__ = [
"run_in_terminal",
"in_terminal",
]
_T = TypeVar("_T")
def run_in_terminal(
func: Callable[[], _T], render_cli_done: bool = False, in_executor: bool = False
) -> Awaitable[_T]:
"""
Run function on the terminal above the current application or prompt.
What this does is first hiding the prompt, then running this callable
(which can safely output to the terminal), and then again rendering the
prompt which causes the output of this function to scroll above the
prompt.
``func`` is supposed to be a synchronous function. If you need an
asynchronous version of this function, use the ``in_terminal`` context
manager directly.
:param func: The callable to execute.
:param render_cli_done: When True, render the interface in the
'Done' state first, then execute the function. If False,
erase the interface first.
:param in_executor: When True, run in executor. (Use this for long
blocking functions, when you don't want to block the event loop.)
:returns: A `Future`.
"""
async def run() -> _T:
async with in_terminal(render_cli_done=render_cli_done):
if in_executor:
return await run_in_executor_with_context(func)
else:
return func()
return ensure_future(run())
@asynccontextmanager
async def in_terminal(render_cli_done: bool = False) -> AsyncGenerator[None, None]:
"""
Asynchronous context manager that suspends the current application and runs
the body in the terminal.
.. code::
async def f():
async with in_terminal():
call_some_function()
await call_some_async_function()
"""
app = get_app_or_none()
if app is None or not app._is_running:
yield
return
# When a previous `run_in_terminal` call was in progress. Wait for that
# to finish, before starting this one. Chain to previous call.
previous_run_in_terminal_f = app._running_in_terminal_f
new_run_in_terminal_f: Future[None] = Future()
app._running_in_terminal_f = new_run_in_terminal_f
# Wait for the previous `run_in_terminal` to finish.
if previous_run_in_terminal_f is not None:
await previous_run_in_terminal_f
# Wait for all CPRs to arrive. We don't want to detach the input until
# all cursor position responses have been arrived. Otherwise, the tty
# will echo its input and can show stuff like ^[[39;1R.
if app.output.responds_to_cpr:
await app.renderer.wait_for_cpr_responses()
# Draw interface in 'done' state, or erase.
if render_cli_done:
app._redraw(render_as_done=True)
else:
app.renderer.erase()
# Disable rendering.
app._running_in_terminal = True
# Detach input.
try:
with app.input.detach():
with app.input.cooked_mode():
yield
finally:
# Redraw interface again.
try:
app._running_in_terminal = False
app.renderer.reset()
app._request_absolute_cursor_position()
app._redraw()
finally:
new_run_in_terminal_f.set_result(None)

View file

@ -0,0 +1,187 @@
"""
`Fish-style <http://fishshell.com/>`_ like auto-suggestion.
While a user types input in a certain buffer, suggestions are generated
(asynchronously.) Usually, they are displayed after the input. When the cursor
presses the right arrow and the cursor is at the end of the input, the
suggestion will be inserted.
If you want the auto suggestions to be asynchronous (in a background thread),
because they take too much time, and could potentially block the event loop,
then wrap the :class:`.AutoSuggest` instance into a
:class:`.ThreadedAutoSuggest`.
"""
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, Optional, Union
from prompt_toolkit.eventloop import run_in_executor_with_context
from .document import Document
from .filters import Filter, to_filter
if TYPE_CHECKING:
from .buffer import Buffer
__all__ = [
"Suggestion",
"AutoSuggest",
"ThreadedAutoSuggest",
"DummyAutoSuggest",
"AutoSuggestFromHistory",
"ConditionalAutoSuggest",
"DynamicAutoSuggest",
]
class Suggestion:
"""
Suggestion returned by an auto-suggest algorithm.
:param text: The suggestion text.
"""
def __init__(self, text: str) -> None:
self.text = text
def __repr__(self) -> str:
return "Suggestion(%s)" % self.text
class AutoSuggest(metaclass=ABCMeta):
"""
Base class for auto suggestion implementations.
"""
@abstractmethod
def get_suggestion(
self, buffer: "Buffer", document: Document
) -> Optional[Suggestion]:
"""
Return `None` or a :class:`.Suggestion` instance.
We receive both :class:`~prompt_toolkit.buffer.Buffer` and
:class:`~prompt_toolkit.document.Document`. The reason is that auto
suggestions are retrieved asynchronously. (Like completions.) The
buffer text could be changed in the meantime, but ``document`` contains
the buffer document like it was at the start of the auto suggestion
call. So, from here, don't access ``buffer.text``, but use
``document.text`` instead.
:param buffer: The :class:`~prompt_toolkit.buffer.Buffer` instance.
:param document: The :class:`~prompt_toolkit.document.Document` instance.
"""
async def get_suggestion_async(
self, buff: "Buffer", document: Document
) -> Optional[Suggestion]:
"""
Return a :class:`.Future` which is set when the suggestions are ready.
This function can be overloaded in order to provide an asynchronous
implementation.
"""
return self.get_suggestion(buff, document)
class ThreadedAutoSuggest(AutoSuggest):
"""
Wrapper that runs auto suggestions in a thread.
(Use this to prevent the user interface from becoming unresponsive if the
generation of suggestions takes too much time.)
"""
def __init__(self, auto_suggest: AutoSuggest) -> None:
self.auto_suggest = auto_suggest
def get_suggestion(
self, buff: "Buffer", document: Document
) -> Optional[Suggestion]:
return self.auto_suggest.get_suggestion(buff, document)
async def get_suggestion_async(
self, buff: "Buffer", document: Document
) -> Optional[Suggestion]:
"""
Run the `get_suggestion` function in a thread.
"""
def run_get_suggestion_thread() -> Optional[Suggestion]:
return self.get_suggestion(buff, document)
return await run_in_executor_with_context(run_get_suggestion_thread)
class DummyAutoSuggest(AutoSuggest):
"""
AutoSuggest class that doesn't return any suggestion.
"""
def get_suggestion(
self, buffer: "Buffer", document: Document
) -> Optional[Suggestion]:
return None # No suggestion
class AutoSuggestFromHistory(AutoSuggest):
"""
Give suggestions based on the lines in the history.
"""
def get_suggestion(
self, buffer: "Buffer", document: Document
) -> Optional[Suggestion]:
history = buffer.history
# Consider only the last line for the suggestion.
text = document.text.rsplit("\n", 1)[-1]
# Only create a suggestion when this is not an empty line.
if text.strip():
# Find first matching line in history.
for string in reversed(list(history.get_strings())):
for line in reversed(string.splitlines()):
if line.startswith(text):
return Suggestion(line[len(text) :])
return None
class ConditionalAutoSuggest(AutoSuggest):
"""
Auto suggest that can be turned on and of according to a certain condition.
"""
def __init__(self, auto_suggest: AutoSuggest, filter: Union[bool, Filter]) -> None:
self.auto_suggest = auto_suggest
self.filter = to_filter(filter)
def get_suggestion(
self, buffer: "Buffer", document: Document
) -> Optional[Suggestion]:
if self.filter():
return self.auto_suggest.get_suggestion(buffer, document)
return None
class DynamicAutoSuggest(AutoSuggest):
"""
Validator class that can dynamically returns any Validator.
:param get_validator: Callable that returns a :class:`.Validator` instance.
"""
def __init__(self, get_auto_suggest: Callable[[], Optional[AutoSuggest]]) -> None:
self.get_auto_suggest = get_auto_suggest
def get_suggestion(
self, buff: "Buffer", document: Document
) -> Optional[Suggestion]:
auto_suggest = self.get_auto_suggest() or DummyAutoSuggest()
return auto_suggest.get_suggestion(buff, document)
async def get_suggestion_async(
self, buff: "Buffer", document: Document
) -> Optional[Suggestion]:
auto_suggest = self.get_auto_suggest() or DummyAutoSuggest()
return await auto_suggest.get_suggestion_async(buff, document)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,125 @@
from collections import deque
from functools import wraps
from typing import Any, Callable, Deque, Dict, Generic, Hashable, Tuple, TypeVar, cast
__all__ = [
"SimpleCache",
"FastDictCache",
"memoized",
]
_T = TypeVar("_T", bound=Hashable)
_U = TypeVar("_U")
class SimpleCache(Generic[_T, _U]):
"""
Very simple cache that discards the oldest item when the cache size is
exceeded.
:param maxsize: Maximum size of the cache. (Don't make it too big.)
"""
def __init__(self, maxsize: int = 8) -> None:
assert maxsize > 0
self._data: Dict[_T, _U] = {}
self._keys: Deque[_T] = deque()
self.maxsize: int = maxsize
def get(self, key: _T, getter_func: Callable[[], _U]) -> _U:
"""
Get object from the cache.
If not found, call `getter_func` to resolve it, and put that on the top
of the cache instead.
"""
# Look in cache first.
try:
return self._data[key]
except KeyError:
# Not found? Get it.
value = getter_func()
self._data[key] = value
self._keys.append(key)
# Remove the oldest key when the size is exceeded.
if len(self._data) > self.maxsize:
key_to_remove = self._keys.popleft()
if key_to_remove in self._data:
del self._data[key_to_remove]
return value
def clear(self) -> None:
" Clear cache. "
self._data = {}
self._keys = deque()
_K = TypeVar("_K", bound=Tuple)
_V = TypeVar("_V")
class FastDictCache(Dict[_K, _V]):
"""
Fast, lightweight cache which keeps at most `size` items.
It will discard the oldest items in the cache first.
The cache is a dictionary, which doesn't keep track of access counts.
It is perfect to cache little immutable objects which are not expensive to
create, but where a dictionary lookup is still much faster than an object
instantiation.
:param get_value: Callable that's called in case of a missing key.
"""
# NOTE: This cache is used to cache `prompt_toolkit.layout.screen.Char` and
# `prompt_toolkit.Document`. Make sure to keep this really lightweight.
# Accessing the cache should stay faster than instantiating new
# objects.
# (Dictionary lookups are really fast.)
# SimpleCache is still required for cases where the cache key is not
# the same as the arguments given to the function that creates the
# value.)
def __init__(self, get_value: Callable[..., _V], size: int = 1000000) -> None:
assert size > 0
self._keys: Deque[_K] = deque()
self.get_value = get_value
self.size = size
def __missing__(self, key: _K) -> _V:
# Remove the oldest key when the size is exceeded.
if len(self) > self.size:
key_to_remove = self._keys.popleft()
if key_to_remove in self:
del self[key_to_remove]
result = self.get_value(*key)
self[key] = result
self._keys.append(key)
return result
_F = TypeVar("_F", bound=Callable)
def memoized(maxsize: int = 1024) -> Callable[[_F], _F]:
"""
Memoization decorator for immutable classes and pure functions.
"""
def decorator(obj: _F) -> _F:
cache: SimpleCache[Hashable, Any] = SimpleCache(maxsize=maxsize)
@wraps(obj)
def new_callable(*a: Any, **kw: Any) -> Any:
def create_new() -> Any:
return obj(*a, **kw)
key = (a, tuple(sorted(kw.items())))
return cache.get(key, create_new)
return cast(_F, new_callable)
return decorator

View file

@ -0,0 +1,15 @@
from .base import Clipboard, ClipboardData, DummyClipboard, DynamicClipboard
from .in_memory import InMemoryClipboard
# We are not importing `PyperclipClipboard` here, because it would require the
# `pyperclip` module to be present.
# from .pyperclip import PyperclipClipboard
__all__ = [
"Clipboard",
"ClipboardData",
"DummyClipboard",
"DynamicClipboard",
"InMemoryClipboard",
]

View file

@ -0,0 +1,107 @@
"""
Clipboard for command line interface.
"""
from abc import ABCMeta, abstractmethod
from typing import Callable, Optional
from prompt_toolkit.selection import SelectionType
__all__ = [
"Clipboard",
"ClipboardData",
"DummyClipboard",
"DynamicClipboard",
]
class ClipboardData:
"""
Text on the clipboard.
:param text: string
:param type: :class:`~prompt_toolkit.selection.SelectionType`
"""
def __init__(
self, text: str = "", type: SelectionType = SelectionType.CHARACTERS
) -> None:
self.text = text
self.type = type
class Clipboard(metaclass=ABCMeta):
"""
Abstract baseclass for clipboards.
(An implementation can be in memory, it can share the X11 or Windows
keyboard, or can be persistent.)
"""
@abstractmethod
def set_data(self, data: ClipboardData) -> None:
"""
Set data to the clipboard.
:param data: :class:`~.ClipboardData` instance.
"""
def set_text(self, text: str) -> None: # Not abstract.
"""
Shortcut for setting plain text on clipboard.
"""
self.set_data(ClipboardData(text))
def rotate(self) -> None:
"""
For Emacs mode, rotate the kill ring.
"""
@abstractmethod
def get_data(self) -> ClipboardData:
"""
Return clipboard data.
"""
class DummyClipboard(Clipboard):
"""
Clipboard implementation that doesn't remember anything.
"""
def set_data(self, data: ClipboardData) -> None:
pass
def set_text(self, text: str) -> None:
pass
def rotate(self) -> None:
pass
def get_data(self) -> ClipboardData:
return ClipboardData()
class DynamicClipboard(Clipboard):
"""
Clipboard class that can dynamically returns any Clipboard.
:param get_clipboard: Callable that returns a :class:`.Clipboard` instance.
"""
def __init__(self, get_clipboard: Callable[[], Optional[Clipboard]]) -> None:
self.get_clipboard = get_clipboard
def _clipboard(self) -> Clipboard:
return self.get_clipboard() or DummyClipboard()
def set_data(self, data: ClipboardData) -> None:
self._clipboard().set_data(data)
def set_text(self, text: str) -> None:
self._clipboard().set_text(text)
def rotate(self) -> None:
self._clipboard().rotate()
def get_data(self) -> ClipboardData:
return self._clipboard().get_data()

View file

@ -0,0 +1,46 @@
from collections import deque
from typing import Deque, Optional
from .base import Clipboard, ClipboardData
__all__ = [
"InMemoryClipboard",
]
class InMemoryClipboard(Clipboard):
"""
Default clipboard implementation.
Just keep the data in memory.
This implements a kill-ring, for Emacs mode.
"""
def __init__(
self, data: Optional[ClipboardData] = None, max_size: int = 60
) -> None:
assert max_size >= 1
self.max_size = max_size
self._ring: Deque[ClipboardData] = deque()
if data is not None:
self.set_data(data)
def set_data(self, data: ClipboardData) -> None:
self._ring.appendleft(data)
while len(self._ring) > self.max_size:
self._ring.pop()
def get_data(self) -> ClipboardData:
if self._ring:
return self._ring[0]
else:
return ClipboardData()
def rotate(self) -> None:
if self._ring:
# Add the very first item at the end.
self._ring.append(self._ring.popleft())

View file

@ -0,0 +1,42 @@
from typing import Optional
import pyperclip
from prompt_toolkit.selection import SelectionType
from .base import Clipboard, ClipboardData
__all__ = [
"PyperclipClipboard",
]
class PyperclipClipboard(Clipboard):
"""
Clipboard that synchronizes with the Windows/Mac/Linux system clipboard,
using the pyperclip module.
"""
def __init__(self) -> None:
self._data: Optional[ClipboardData] = None
def set_data(self, data: ClipboardData) -> None:
self._data = data
pyperclip.copy(data.text)
def get_data(self) -> ClipboardData:
text = pyperclip.paste()
# When the clipboard data is equal to what we copied last time, reuse
# the `ClipboardData` instance. That way we're sure to keep the same
# `SelectionType`.
if self._data and self._data.text == text:
return self._data
# Pyperclip returned something else. Create a new `ClipboardData`
# instance.
else:
return ClipboardData(
text=text,
type=SelectionType.LINES if "\n" in text else SelectionType.LINES,
)

View file

@ -0,0 +1,36 @@
from .base import (
CompleteEvent,
Completer,
Completion,
DummyCompleter,
DynamicCompleter,
ThreadedCompleter,
get_common_complete_suffix,
merge_completers,
)
from .filesystem import ExecutableCompleter, PathCompleter
from .fuzzy_completer import FuzzyCompleter, FuzzyWordCompleter
from .nested import NestedCompleter
from .word_completer import WordCompleter
__all__ = [
# Base.
"Completion",
"Completer",
"ThreadedCompleter",
"DummyCompleter",
"DynamicCompleter",
"CompleteEvent",
"merge_completers",
"get_common_complete_suffix",
# Filesystem.
"PathCompleter",
"ExecutableCompleter",
# Fuzzy
"FuzzyCompleter",
"FuzzyWordCompleter",
# Nested.
"NestedCompleter",
# Word completer.
"WordCompleter",
]

View file

@ -0,0 +1,349 @@
"""
"""
from abc import ABCMeta, abstractmethod
from typing import AsyncGenerator, Callable, Iterable, Optional, Sequence
from prompt_toolkit.document import Document
from prompt_toolkit.eventloop import generator_to_async_generator
from prompt_toolkit.formatted_text import AnyFormattedText, StyleAndTextTuples
__all__ = [
"Completion",
"Completer",
"ThreadedCompleter",
"DummyCompleter",
"DynamicCompleter",
"CompleteEvent",
"merge_completers",
"get_common_complete_suffix",
]
class Completion:
"""
:param text: The new string that will be inserted into the document.
:param start_position: Position relative to the cursor_position where the
new text will start. The text will be inserted between the
start_position and the original cursor position.
:param display: (optional string or formatted text) If the completion has
to be displayed differently in the completion menu.
:param display_meta: (Optional string or formatted text) Meta information
about the completion, e.g. the path or source where it's coming from.
This can also be a callable that returns a string.
:param style: Style string.
:param selected_style: Style string, used for a selected completion.
This can override the `style` parameter.
"""
def __init__(
self,
text: str,
start_position: int = 0,
display: Optional[AnyFormattedText] = None,
display_meta: Optional[AnyFormattedText] = None,
style: str = "",
selected_style: str = "",
) -> None:
from prompt_toolkit.formatted_text import to_formatted_text
self.text = text
self.start_position = start_position
self._display_meta = display_meta
if display is None:
display = text
self.display = to_formatted_text(display)
self.style = style
self.selected_style = selected_style
assert self.start_position <= 0
def __repr__(self) -> str:
if isinstance(self.display, str) and self.display == self.text:
return "%s(text=%r, start_position=%r)" % (
self.__class__.__name__,
self.text,
self.start_position,
)
else:
return "%s(text=%r, start_position=%r, display=%r)" % (
self.__class__.__name__,
self.text,
self.start_position,
self.display,
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Completion):
return False
return (
self.text == other.text
and self.start_position == other.start_position
and self.display == other.display
and self._display_meta == other._display_meta
)
def __hash__(self) -> int:
return hash((self.text, self.start_position, self.display, self._display_meta))
@property
def display_text(self) -> str:
" The 'display' field as plain text. "
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display)
@property
def display_meta(self) -> StyleAndTextTuples:
" Return meta-text. (This is lazy when using a callable). "
from prompt_toolkit.formatted_text import to_formatted_text
return to_formatted_text(self._display_meta or "")
@property
def display_meta_text(self) -> str:
" The 'meta' field as plain text. "
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display_meta)
def new_completion_from_position(self, position: int) -> "Completion":
"""
(Only for internal use!)
Get a new completion by splitting this one. Used by `Application` when
it needs to have a list of new completions after inserting the common
prefix.
"""
assert position - self.start_position >= 0
return Completion(
text=self.text[position - self.start_position :],
display=self.display,
display_meta=self._display_meta,
)
class CompleteEvent:
"""
Event that called the completer.
:param text_inserted: When True, it means that completions are requested
because of a text insert. (`Buffer.complete_while_typing`.)
:param completion_requested: When True, it means that the user explicitly
pressed the `Tab` key in order to view the completions.
These two flags can be used for instance to implement a completer that
shows some completions when ``Tab`` has been pressed, but not
automatically when the user presses a space. (Because of
`complete_while_typing`.)
"""
def __init__(
self, text_inserted: bool = False, completion_requested: bool = False
) -> None:
assert not (text_inserted and completion_requested)
#: Automatic completion while typing.
self.text_inserted = text_inserted
#: Used explicitly requested completion by pressing 'tab'.
self.completion_requested = completion_requested
def __repr__(self) -> str:
return "%s(text_inserted=%r, completion_requested=%r)" % (
self.__class__.__name__,
self.text_inserted,
self.completion_requested,
)
class Completer(metaclass=ABCMeta):
"""
Base class for completer implementations.
"""
@abstractmethod
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
"""
This should be a generator that yields :class:`.Completion` instances.
If the generation of completions is something expensive (that takes a
lot of time), consider wrapping this `Completer` class in a
`ThreadedCompleter`. In that case, the completer algorithm runs in a
background thread and completions will be displayed as soon as they
arrive.
:param document: :class:`~prompt_toolkit.document.Document` instance.
:param complete_event: :class:`.CompleteEvent` instance.
"""
while False:
yield
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
"""
Asynchronous generator for completions. (Probably, you won't have to
override this.)
Asynchronous generator of :class:`.Completion` objects.
"""
for item in self.get_completions(document, complete_event):
yield item
class ThreadedCompleter(Completer):
"""
Wrapper that runs the `get_completions` generator in a thread.
(Use this to prevent the user interface from becoming unresponsive if the
generation of completions takes too much time.)
The completions will be displayed as soon as they are produced. The user
can already select a completion, even if not all completions are displayed.
"""
def __init__(self, completer: Completer) -> None:
self.completer = completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return self.completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
"""
Asynchronous generator of completions.
"""
async for completion in generator_to_async_generator(
lambda: self.completer.get_completions(document, complete_event)
):
yield completion
def __repr__(self) -> str:
return "ThreadedCompleter(%r)" % (self.completer,)
class DummyCompleter(Completer):
"""
A completer that doesn't return any completion.
"""
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return []
def __repr__(self) -> str:
return "DummyCompleter()"
class DynamicCompleter(Completer):
"""
Completer class that can dynamically returns any Completer.
:param get_completer: Callable that returns a :class:`.Completer` instance.
"""
def __init__(self, get_completer: Callable[[], Optional[Completer]]) -> None:
self.get_completer = get_completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
completer = self.get_completer() or DummyCompleter()
return completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
completer = self.get_completer() or DummyCompleter()
async for completion in completer.get_completions_async(
document, complete_event
):
yield completion
def __repr__(self) -> str:
return "DynamicCompleter(%r -> %r)" % (self.get_completer, self.get_completer())
class _MergedCompleter(Completer):
"""
Combine several completers into one.
"""
def __init__(self, completers: Sequence[Completer]) -> None:
self.completers = completers
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Get all completions from the other completers in a blocking way.
for completer in self.completers:
for c in completer.get_completions(document, complete_event):
yield c
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
# Get all completions from the other completers in a blocking way.
for completer in self.completers:
async for item in completer.get_completions_async(document, complete_event):
yield item
def merge_completers(completers: Sequence[Completer]) -> _MergedCompleter:
"""
Combine several completers into one.
"""
return _MergedCompleter(completers)
def get_common_complete_suffix(
document: Document, completions: Sequence[Completion]
) -> str:
"""
Return the common prefix for all completions.
"""
# Take only completions that don't change the text before the cursor.
def doesnt_change_before_cursor(completion: Completion) -> bool:
end = completion.text[: -completion.start_position]
return document.text_before_cursor.endswith(end)
completions2 = [c for c in completions if doesnt_change_before_cursor(c)]
# When there is at least one completion that changes the text before the
# cursor, don't return any common part.
if len(completions2) != len(completions):
return ""
# Return the common prefix.
def get_suffix(completion: Completion) -> str:
return completion.text[-completion.start_position :]
return _commonprefix([get_suffix(c) for c in completions2])
def _commonprefix(strings: Iterable[str]) -> str:
# Similar to os.path.commonprefix
if not strings:
return ""
else:
s1 = min(strings)
s2 = max(strings)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1

View file

@ -0,0 +1,113 @@
import os
from typing import Callable, Iterable, List, Optional
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
__all__ = [
"PathCompleter",
"ExecutableCompleter",
]
class PathCompleter(Completer):
"""
Complete for Path variables.
:param get_paths: Callable which returns a list of directories to look into
when the user enters a relative path.
:param file_filter: Callable which takes a filename and returns whether
this file should show up in the completion. ``None``
when no filtering has to be done.
:param min_input_len: Don't do autocompletion when the input string is shorter.
"""
def __init__(
self,
only_directories: bool = False,
get_paths: Optional[Callable[[], List[str]]] = None,
file_filter: Optional[Callable[[str], bool]] = None,
min_input_len: int = 0,
expanduser: bool = False,
) -> None:
self.only_directories = only_directories
self.get_paths = get_paths or (lambda: ["."])
self.file_filter = file_filter or (lambda _: True)
self.min_input_len = min_input_len
self.expanduser = expanduser
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
text = document.text_before_cursor
# Complete only when we have at least the minimal input length,
# otherwise, we can too many results and autocompletion will become too
# heavy.
if len(text) < self.min_input_len:
return
try:
# Do tilde expansion.
if self.expanduser:
text = os.path.expanduser(text)
# Directories where to look.
dirname = os.path.dirname(text)
if dirname:
directories = [
os.path.dirname(os.path.join(p, text)) for p in self.get_paths()
]
else:
directories = self.get_paths()
# Start of current file.
prefix = os.path.basename(text)
# Get all filenames.
filenames = []
for directory in directories:
# Look for matches in this directory.
if os.path.isdir(directory):
for filename in os.listdir(directory):
if filename.startswith(prefix):
filenames.append((directory, filename))
# Sort
filenames = sorted(filenames, key=lambda k: k[1])
# Yield them.
for directory, filename in filenames:
completion = filename[len(prefix) :]
full_name = os.path.join(directory, filename)
if os.path.isdir(full_name):
# For directories, add a slash to the filename.
# (We don't add them to the `completion`. Users can type it
# to trigger the autocompletion themselves.)
filename += "/"
elif self.only_directories:
continue
if not self.file_filter(full_name):
continue
yield Completion(completion, 0, display=filename)
except OSError:
pass
class ExecutableCompleter(PathCompleter):
"""
Complete only executable files in the current path.
"""
def __init__(self) -> None:
super().__init__(
only_directories=False,
min_input_len=1,
get_paths=lambda: os.environ.get("PATH", "").split(os.pathsep),
file_filter=lambda name: os.access(name, os.X_OK),
expanduser=True,
),

View file

@ -0,0 +1,199 @@
import re
from typing import Callable, Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
from prompt_toolkit.document import Document
from prompt_toolkit.filters import FilterOrBool, to_filter
from prompt_toolkit.formatted_text import AnyFormattedText, StyleAndTextTuples
from .base import CompleteEvent, Completer, Completion
from .word_completer import WordCompleter
__all__ = [
"FuzzyCompleter",
"FuzzyWordCompleter",
]
class FuzzyCompleter(Completer):
"""
Fuzzy completion.
This wraps any other completer and turns it into a fuzzy completer.
If the list of words is: ["leopard" , "gorilla", "dinosaur", "cat", "bee"]
Then trying to complete "oar" would yield "leopard" and "dinosaur", but not
the others, because they match the regular expression 'o.*a.*r'.
Similar, in another application "djm" could expand to "django_migrations".
The results are sorted by relevance, which is defined as the start position
and the length of the match.
Notice that this is not really a tool to work around spelling mistakes,
like what would be possible with difflib. The purpose is rather to have a
quicker or more intuitive way to filter the given completions, especially
when many completions have a common prefix.
Fuzzy algorithm is based on this post:
https://blog.amjith.com/fuzzyfinder-in-10-lines-of-python
:param completer: A :class:`~.Completer` instance.
:param WORD: When True, use WORD characters.
:param pattern: Regex pattern which selects the characters before the
cursor that are considered for the fuzzy matching.
:param enable_fuzzy: (bool or `Filter`) Enabled the fuzzy behavior. For
easily turning fuzzyness on or off according to a certain condition.
"""
def __init__(
self,
completer: Completer,
WORD: bool = False,
pattern: Optional[str] = None,
enable_fuzzy: FilterOrBool = True,
):
assert pattern is None or pattern.startswith("^")
self.completer = completer
self.pattern = pattern
self.WORD = WORD
self.pattern = pattern
self.enable_fuzzy = to_filter(enable_fuzzy)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
if self.enable_fuzzy():
return self._get_fuzzy_completions(document, complete_event)
else:
return self.completer.get_completions(document, complete_event)
def _get_pattern(self) -> str:
if self.pattern:
return self.pattern
if self.WORD:
return r"[^\s]+"
return "^[a-zA-Z0-9_]*"
def _get_fuzzy_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
word_before_cursor = document.get_word_before_cursor(
pattern=re.compile(self._get_pattern())
)
# Get completions
document2 = Document(
text=document.text[: document.cursor_position - len(word_before_cursor)],
cursor_position=document.cursor_position - len(word_before_cursor),
)
completions = list(self.completer.get_completions(document2, complete_event))
fuzzy_matches: List[_FuzzyMatch] = []
pat = ".*?".join(map(re.escape, word_before_cursor))
pat = "(?=({0}))".format(pat) # lookahead regex to manage overlapping matches
regex = re.compile(pat, re.IGNORECASE)
for compl in completions:
matches = list(regex.finditer(compl.text))
if matches:
# Prefer the match, closest to the left, then shortest.
best = min(matches, key=lambda m: (m.start(), len(m.group(1))))
fuzzy_matches.append(
_FuzzyMatch(len(best.group(1)), best.start(), compl)
)
def sort_key(fuzzy_match: "_FuzzyMatch") -> Tuple[int, int]:
" Sort by start position, then by the length of the match. "
return fuzzy_match.start_pos, fuzzy_match.match_length
fuzzy_matches = sorted(fuzzy_matches, key=sort_key)
for match in fuzzy_matches:
# Include these completions, but set the correct `display`
# attribute and `start_position`.
yield Completion(
match.completion.text,
start_position=match.completion.start_position
- len(word_before_cursor),
display_meta=match.completion.display_meta,
display=self._get_display(match, word_before_cursor),
style=match.completion.style,
)
def _get_display(
self, fuzzy_match: "_FuzzyMatch", word_before_cursor: str
) -> AnyFormattedText:
"""
Generate formatted text for the display label.
"""
m = fuzzy_match
word = m.completion.text
if m.match_length == 0:
# No highlighting when we have zero length matches (no input text).
return word
result: StyleAndTextTuples = []
# Text before match.
result.append(("class:fuzzymatch.outside", word[: m.start_pos]))
# The match itself.
characters = list(word_before_cursor)
for c in word[m.start_pos : m.start_pos + m.match_length]:
classname = "class:fuzzymatch.inside"
if characters and c.lower() == characters[0].lower():
classname += ".character"
del characters[0]
result.append((classname, c))
# Text after match.
result.append(
("class:fuzzymatch.outside", word[m.start_pos + m.match_length :])
)
return result
class FuzzyWordCompleter(Completer):
"""
Fuzzy completion on a list of words.
(This is basically a `WordCompleter` wrapped in a `FuzzyCompleter`.)
:param words: List of words or callable that returns a list of words.
:param meta_dict: Optional dict mapping words to their meta-information.
:param WORD: When True, use WORD characters.
"""
def __init__(
self,
words: Union[List[str], Callable[[], List[str]]],
meta_dict: Optional[Dict[str, str]] = None,
WORD: bool = False,
) -> None:
self.words = words
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.word_completer = WordCompleter(
words=self.words, WORD=self.WORD, meta_dict=self.meta_dict
)
self.fuzzy_completer = FuzzyCompleter(self.word_completer, WORD=self.WORD)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return self.fuzzy_completer.get_completions(document, complete_event)
_FuzzyMatch = NamedTuple(
"_FuzzyMatch",
[("match_length", int), ("start_pos", int), ("completion", Completion)],
)

View file

@ -0,0 +1,109 @@
"""
Nestedcompleter for completion of hierarchical data structures.
"""
from typing import Any, Dict, Iterable, Mapping, Optional, Set, Union
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.completion.word_completer import WordCompleter
from prompt_toolkit.document import Document
__all__ = ["NestedCompleter"]
# NestedDict = Mapping[str, Union['NestedDict', Set[str], None, Completer]]
NestedDict = Mapping[str, Union[Any, Set[str], None, Completer]]
class NestedCompleter(Completer):
"""
Completer which wraps around several other completers, and calls any the
one that corresponds with the first word of the input.
By combining multiple `NestedCompleter` instances, we can achieve multiple
hierarchical levels of autocompletion. This is useful when `WordCompleter`
is not sufficient.
If you need multiple levels, check out the `from_nested_dict` classmethod.
"""
def __init__(
self, options: Dict[str, Optional[Completer]], ignore_case: bool = True
) -> None:
self.options = options
self.ignore_case = ignore_case
def __repr__(self) -> str:
return "NestedCompleter(%r, ignore_case=%r)" % (self.options, self.ignore_case)
@classmethod
def from_nested_dict(cls, data: NestedDict) -> "NestedCompleter":
"""
Create a `NestedCompleter`, starting from a nested dictionary data
structure, like this:
.. code::
data = {
'show': {
'version': None,
'interfaces': None,
'clock': None,
'ip': {'interface': {'brief'}}
},
'exit': None
'enable': None
}
The value should be `None` if there is no further completion at some
point. If all values in the dictionary are None, it is also possible to
use a set instead.
Values in this data structure can be a completers as well.
"""
options: Dict[str, Optional[Completer]] = {}
for key, value in data.items():
if isinstance(value, Completer):
options[key] = value
elif isinstance(value, dict):
options[key] = cls.from_nested_dict(value)
elif isinstance(value, set):
options[key] = cls.from_nested_dict({item: None for item in value})
else:
assert value is None
options[key] = None
return cls(options)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Split document.
text = document.text_before_cursor.lstrip()
stripped_len = len(document.text_before_cursor) - len(text)
# If there is a space, check for the first term, and use a
# subcompleter.
if " " in text:
first_term = text.split()[0]
completer = self.options.get(first_term)
# If we have a sub completer, use this for the completions.
if completer is not None:
remaining_text = text[len(first_term) :].lstrip()
move_cursor = len(text) - len(remaining_text) + stripped_len
new_document = Document(
remaining_text,
cursor_position=document.cursor_position - move_cursor,
)
for c in completer.get_completions(new_document, complete_event):
yield c
# No space in the input: behave exactly like `WordCompleter`.
else:
completer = WordCompleter(
list(self.options.keys()), ignore_case=self.ignore_case
)
for c in completer.get_completions(document, complete_event):
yield c

View file

@ -0,0 +1,84 @@
from typing import Callable, Dict, Iterable, List, Optional, Pattern, Union
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
__all__ = [
"WordCompleter",
]
class WordCompleter(Completer):
"""
Simple autocompletion on a list of words.
:param words: List of words or callable that returns a list of words.
:param ignore_case: If True, case-insensitive completion.
:param meta_dict: Optional dict mapping words to their meta-text. (This
should map strings to strings or formatted text.)
:param WORD: When True, use WORD characters.
:param sentence: When True, don't complete by comparing the word before the
cursor, but by comparing all the text before the cursor. In this case,
the list of words is just a list of strings, where each string can
contain spaces. (Can not be used together with the WORD option.)
:param match_middle: When True, match not only the start, but also in the
middle of the word.
:param pattern: Optional compiled regex for finding the word before
the cursor to complete. When given, use this regex pattern instead of
default one (see document._FIND_WORD_RE)
"""
def __init__(
self,
words: Union[List[str], Callable[[], List[str]]],
ignore_case: bool = False,
meta_dict: Optional[Dict[str, str]] = None,
WORD: bool = False,
sentence: bool = False,
match_middle: bool = False,
pattern: Optional[Pattern[str]] = None,
) -> None:
assert not (WORD and sentence)
self.words = words
self.ignore_case = ignore_case
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.sentence = sentence
self.match_middle = match_middle
self.pattern = pattern
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Get list of words.
words = self.words
if callable(words):
words = words()
# Get word/text before cursor.
if self.sentence:
word_before_cursor = document.text_before_cursor
else:
word_before_cursor = document.get_word_before_cursor(
WORD=self.WORD, pattern=self.pattern
)
if self.ignore_case:
word_before_cursor = word_before_cursor.lower()
def word_matches(word: str) -> bool:
""" True when the word before the cursor matches. """
if self.ignore_case:
word = word.lower()
if self.match_middle:
return word_before_cursor in word
else:
return word.startswith(word_before_cursor)
for a in words:
if word_matches(a):
display_meta = self.meta_dict.get(a, "")
yield Completion(a, -len(word_before_cursor), display_meta=display_meta)

View file

@ -0,0 +1,3 @@
from .system import SystemCompleter
__all__ = ["SystemCompleter"]

View file

@ -0,0 +1,62 @@
from prompt_toolkit.completion.filesystem import ExecutableCompleter, PathCompleter
from prompt_toolkit.contrib.regular_languages.compiler import compile
from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
__all__ = [
"SystemCompleter",
]
class SystemCompleter(GrammarCompleter):
"""
Completer for system commands.
"""
def __init__(self) -> None:
# Compile grammar.
g = compile(
r"""
# First we have an executable.
(?P<executable>[^\s]+)
# Ignore literals in between.
(
\s+
("[^"]*" | '[^']*' | [^'"]+ )
)*
\s+
# Filename as parameters.
(
(?P<filename>[^\s]+) |
"(?P<double_quoted_filename>[^\s]+)" |
'(?P<single_quoted_filename>[^\s]+)'
)
""",
escape_funcs={
"double_quoted_filename": (lambda string: string.replace('"', '\\"')),
"single_quoted_filename": (lambda string: string.replace("'", "\\'")),
},
unescape_funcs={
"double_quoted_filename": (
lambda string: string.replace('\\"', '"')
), # XXX: not entirely correct.
"single_quoted_filename": (lambda string: string.replace("\\'", "'")),
},
)
# Create GrammarCompleter
super().__init__(
g,
{
"executable": ExecutableCompleter(),
"filename": PathCompleter(only_directories=False, expanduser=True),
"double_quoted_filename": PathCompleter(
only_directories=False, expanduser=True
),
"single_quoted_filename": PathCompleter(
only_directories=False, expanduser=True
),
},
)

View file

@ -0,0 +1,77 @@
r"""
Tool for expressing the grammar of an input as a regular language.
==================================================================
The grammar for the input of many simple command line interfaces can be
expressed by a regular language. Examples are PDB (the Python debugger); a
simple (bash-like) shell with "pwd", "cd", "cat" and "ls" commands; arguments
that you can pass to an executable; etc. It is possible to use regular
expressions for validation and parsing of such a grammar. (More about regular
languages: http://en.wikipedia.org/wiki/Regular_language)
Example
-------
Let's take the pwd/cd/cat/ls example. We want to have a shell that accepts
these three commands. "cd" is followed by a quoted directory name and "cat" is
followed by a quoted file name. (We allow quotes inside the filename when
they're escaped with a backslash.) We could define the grammar using the
following regular expression::
grammar = \s* (
pwd |
ls |
(cd \s+ " ([^"]|\.)+ ") |
(cat \s+ " ([^"]|\.)+ ")
) \s*
What can we do with this grammar?
---------------------------------
- Syntax highlighting: We could use this for instance to give file names
different colour.
- Parse the result: .. We can extract the file names and commands by using a
regular expression with named groups.
- Input validation: .. Don't accept anything that does not match this grammar.
When combined with a parser, we can also recursively do
filename validation (and accept only existing files.)
- Autocompletion: .... Each part of the grammar can have its own autocompleter.
"cat" has to be completed using file names, while "cd"
has to be completed using directory names.
How does it work?
-----------------
As a user of this library, you have to define the grammar of the input as a
regular expression. The parts of this grammar where autocompletion, validation
or any other processing is required need to be marked using a regex named
group. Like ``(?P<varname>...)`` for instance.
When the input is processed for validation (for instance), the regex will
execute, the named group is captured, and the validator associated with this
named group will test the captured string.
There is one tricky bit:
Often we operate on incomplete input (this is by definition the case for
autocompletion) and we have to decide for the cursor position in which
possible state the grammar it could be and in which way variables could be
matched up to that point.
To solve this problem, the compiler takes the original regular expression and
translates it into a set of other regular expressions which each match certain
prefixes of the original regular expression. We generate one prefix regular
expression for every named variable (with this variable being the end of that
expression).
TODO: some examples of:
- How to create a highlighter from this grammar.
- How to create a validator from this grammar.
- How to create an autocompleter from this grammar.
- How to create a parser from this grammar.
"""
from .compiler import compile
__all__ = ["compile"]

View file

@ -0,0 +1,573 @@
r"""
Compiler for a regular grammar.
Example usage::
# Create and compile grammar.
p = compile('add \s+ (?P<var1>[^\s]+) \s+ (?P<var2>[^\s]+)')
# Match input string.
m = p.match('add 23 432')
# Get variables.
m.variables().get('var1') # Returns "23"
m.variables().get('var2') # Returns "432"
Partial matches are possible::
# Create and compile grammar.
p = compile('''
# Operators with two arguments.
((?P<operator1>[^\s]+) \s+ (?P<var1>[^\s]+) \s+ (?P<var2>[^\s]+)) |
# Operators with only one arguments.
((?P<operator2>[^\s]+) \s+ (?P<var1>[^\s]+))
''')
# Match partial input string.
m = p.match_prefix('add 23')
# Get variables. (Notice that both operator1 and operator2 contain the
# value "add".) This is because our input is incomplete, and we don't know
# yet in which rule of the regex we we'll end up. It could also be that
# `operator1` and `operator2` have a different autocompleter and we want to
# call all possible autocompleters that would result in valid input.)
m.variables().get('var1') # Returns "23"
m.variables().get('operator1') # Returns "add"
m.variables().get('operator2') # Returns "add"
"""
import re
from typing import Callable, Dict, Iterable, Iterator, List
from typing import Match as RegexMatch
from typing import Optional, Pattern, Tuple, cast
from .regex_parser import (
AnyNode,
Lookahead,
Node,
NodeSequence,
Regex,
Repeat,
Variable,
parse_regex,
tokenize_regex,
)
__all__ = [
"compile",
]
# Name of the named group in the regex, matching trailing input.
# (Trailing input is when the input contains characters after the end of the
# expression has been matched.)
_INVALID_TRAILING_INPUT = "invalid_trailing"
EscapeFuncDict = Dict[str, Callable[[str], str]]
class _CompiledGrammar:
"""
Compiles a grammar. This will take the parse tree of a regular expression
and compile the grammar.
:param root_node: :class~`.regex_parser.Node` instance.
:param escape_funcs: `dict` mapping variable names to escape callables.
:param unescape_funcs: `dict` mapping variable names to unescape callables.
"""
def __init__(
self,
root_node: Node,
escape_funcs: Optional[EscapeFuncDict] = None,
unescape_funcs: Optional[EscapeFuncDict] = None,
) -> None:
self.root_node = root_node
self.escape_funcs = escape_funcs or {}
self.unescape_funcs = unescape_funcs or {}
#: Dictionary that will map the regex names to Node instances.
self._group_names_to_nodes: Dict[
str, str
] = {} # Maps regex group names to varnames.
counter = [0]
def create_group_func(node: Variable) -> str:
name = "n%s" % counter[0]
self._group_names_to_nodes[name] = node.varname
counter[0] += 1
return name
# Compile regex strings.
self._re_pattern = "^%s$" % self._transform(root_node, create_group_func)
self._re_prefix_patterns = list(
self._transform_prefix(root_node, create_group_func)
)
# Compile the regex itself.
flags = re.DOTALL # Note that we don't need re.MULTILINE! (^ and $
# still represent the start and end of input text.)
self._re = re.compile(self._re_pattern, flags)
self._re_prefix = [re.compile(t, flags) for t in self._re_prefix_patterns]
# We compile one more set of regexes, similar to `_re_prefix`, but accept any trailing
# input. This will ensure that we can still highlight the input correctly, even when the
# input contains some additional characters at the end that don't match the grammar.)
self._re_prefix_with_trailing_input = [
re.compile(
r"(?:%s)(?P<%s>.*?)$" % (t.rstrip("$"), _INVALID_TRAILING_INPUT), flags
)
for t in self._re_prefix_patterns
]
def escape(self, varname: str, value: str) -> str:
"""
Escape `value` to fit in the place of this variable into the grammar.
"""
f = self.escape_funcs.get(varname)
return f(value) if f else value
def unescape(self, varname: str, value: str) -> str:
"""
Unescape `value`.
"""
f = self.unescape_funcs.get(varname)
return f(value) if f else value
@classmethod
def _transform(
cls, root_node: Node, create_group_func: Callable[[Variable], str]
) -> str:
"""
Turn a :class:`Node` object into a regular expression.
:param root_node: The :class:`Node` instance for which we generate the grammar.
:param create_group_func: A callable which takes a `Node` and returns the next
free name for this node.
"""
def transform(node: Node) -> str:
# Turn `AnyNode` into an OR.
if isinstance(node, AnyNode):
return "(?:%s)" % "|".join(transform(c) for c in node.children)
# Concatenate a `NodeSequence`
elif isinstance(node, NodeSequence):
return "".join(transform(c) for c in node.children)
# For Regex and Lookahead nodes, just insert them literally.
elif isinstance(node, Regex):
return node.regex
elif isinstance(node, Lookahead):
before = "(?!" if node.negative else "(="
return before + transform(node.childnode) + ")"
# A `Variable` wraps the children into a named group.
elif isinstance(node, Variable):
return "(?P<%s>%s)" % (
create_group_func(node),
transform(node.childnode),
)
# `Repeat`.
elif isinstance(node, Repeat):
if node.max_repeat is None:
if node.min_repeat == 0:
repeat_sign = "*"
elif node.min_repeat == 1:
repeat_sign = "+"
else:
repeat_sign = "{%i,%s}" % (
node.min_repeat,
("" if node.max_repeat is None else str(node.max_repeat)),
)
return "(?:%s)%s%s" % (
transform(node.childnode),
repeat_sign,
("" if node.greedy else "?"),
)
else:
raise TypeError("Got %r" % (node,))
return transform(root_node)
@classmethod
def _transform_prefix(
cls, root_node: Node, create_group_func: Callable[[Variable], str]
) -> Iterable[str]:
"""
Yield all the regular expressions matching a prefix of the grammar
defined by the `Node` instance.
For each `Variable`, one regex pattern will be generated, with this
named group at the end. This is required because a regex engine will
terminate once a match is found. For autocompletion however, we need
the matches for all possible paths, so that we can provide completions
for each `Variable`.
- So, in the case of an `Any` (`A|B|C)', we generate a pattern for each
clause. This is one for `A`, one for `B` and one for `C`. Unless some
groups don't contain a `Variable`, then these can be merged together.
- In the case of a `NodeSequence` (`ABC`), we generate a pattern for
each prefix that ends with a variable, and one pattern for the whole
sequence. So, that's one for `A`, one for `AB` and one for `ABC`.
:param root_node: The :class:`Node` instance for which we generate the grammar.
:param create_group_func: A callable which takes a `Node` and returns the next
free name for this node.
"""
def contains_variable(node: Node) -> bool:
if isinstance(node, Regex):
return False
elif isinstance(node, Variable):
return True
elif isinstance(node, (Lookahead, Repeat)):
return contains_variable(node.childnode)
elif isinstance(node, (NodeSequence, AnyNode)):
return any(contains_variable(child) for child in node.children)
return False
def transform(node: Node) -> Iterable[str]:
# Generate separate pattern for all terms that contain variables
# within this OR. Terms that don't contain a variable can be merged
# together in one pattern.
if isinstance(node, AnyNode):
# If we have a definition like:
# (?P<name> .*) | (?P<city> .*)
# Then we want to be able to generate completions for both the
# name as well as the city. We do this by yielding two
# different regular expressions, because the engine won't
# follow multiple paths, if multiple are possible.
children_with_variable = []
children_without_variable = []
for c in node.children:
if contains_variable(c):
children_with_variable.append(c)
else:
children_without_variable.append(c)
for c in children_with_variable:
yield from transform(c)
# Merge options without variable together.
if children_without_variable:
yield "|".join(
r for c in children_without_variable for r in transform(c)
)
# For a sequence, generate a pattern for each prefix that ends with
# a variable + one pattern of the complete sequence.
# (This is because, for autocompletion, we match the text before
# the cursor, and completions are given for the variable that we
# match right before the cursor.)
elif isinstance(node, NodeSequence):
# For all components in the sequence, compute prefix patterns,
# as well as full patterns.
complete = [cls._transform(c, create_group_func) for c in node.children]
prefixes = [list(transform(c)) for c in node.children]
variable_nodes = [contains_variable(c) for c in node.children]
# If any child is contains a variable, we should yield a
# pattern up to that point, so that we are sure this will be
# matched.
for i in range(len(node.children)):
if variable_nodes[i]:
for c_str in prefixes[i]:
yield "".join(complete[:i]) + c_str
# If there are non-variable nodes, merge all the prefixes into
# one pattern. If the input is: "[part1] [part2] [part3]", then
# this gets compiled into:
# (complete1 + (complete2 + (complete3 | partial3) | partial2) | partial1 )
# For nodes that contain a variable, we skip the "|partial"
# part here, because thees are matched with the previous
# patterns.
if not all(variable_nodes):
result = []
# Start with complete patterns.
for i in range(len(node.children)):
result.append("(?:")
result.append(complete[i])
# Add prefix patterns.
for i in range(len(node.children) - 1, -1, -1):
if variable_nodes[i]:
# No need to yield a prefix for this one, we did
# the variable prefixes earlier.
result.append(")")
else:
result.append("|(?:")
# If this yields multiple, we should yield all combinations.
assert len(prefixes[i]) == 1
result.append(prefixes[i][0])
result.append("))")
yield "".join(result)
elif isinstance(node, Regex):
yield "(?:%s)?" % node.regex
elif isinstance(node, Lookahead):
if node.negative:
yield "(?!%s)" % cls._transform(node.childnode, create_group_func)
else:
# Not sure what the correct semantics are in this case.
# (Probably it's not worth implementing this.)
raise Exception("Positive lookahead not yet supported.")
elif isinstance(node, Variable):
# (Note that we should not append a '?' here. the 'transform'
# method will already recursively do that.)
for c_str in transform(node.childnode):
yield "(?P<%s>%s)" % (create_group_func(node), c_str)
elif isinstance(node, Repeat):
# If we have a repetition of 8 times. That would mean that the
# current input could have for instance 7 times a complete
# match, followed by a partial match.
prefix = cls._transform(node.childnode, create_group_func)
if node.max_repeat == 1:
yield from transform(node.childnode)
else:
for c_str in transform(node.childnode):
if node.max_repeat:
repeat_sign = "{,%i}" % (node.max_repeat - 1)
else:
repeat_sign = "*"
yield "(?:%s)%s%s%s" % (
prefix,
repeat_sign,
("" if node.greedy else "?"),
c_str,
)
else:
raise TypeError("Got %r" % node)
for r in transform(root_node):
yield "^(?:%s)$" % r
def match(self, string: str) -> Optional["Match"]:
"""
Match the string with the grammar.
Returns a :class:`Match` instance or `None` when the input doesn't match the grammar.
:param string: The input string.
"""
m = self._re.match(string)
if m:
return Match(
string, [(self._re, m)], self._group_names_to_nodes, self.unescape_funcs
)
return None
def match_prefix(self, string: str) -> Optional["Match"]:
"""
Do a partial match of the string with the grammar. The returned
:class:`Match` instance can contain multiple representations of the
match. This will never return `None`. If it doesn't match at all, the "trailing input"
part will capture all of the input.
:param string: The input string.
"""
# First try to match using `_re_prefix`. If nothing is found, use the patterns that
# also accept trailing characters.
for patterns in [self._re_prefix, self._re_prefix_with_trailing_input]:
matches = [(r, r.match(string)) for r in patterns]
matches2 = [(r, m) for r, m in matches if m]
if matches2 != []:
return Match(
string, matches2, self._group_names_to_nodes, self.unescape_funcs
)
return None
class Match:
"""
:param string: The input string.
:param re_matches: List of (compiled_re_pattern, re_match) tuples.
:param group_names_to_nodes: Dictionary mapping all the re group names to the matching Node instances.
"""
def __init__(
self,
string: str,
re_matches: List[Tuple[Pattern[str], RegexMatch[str]]],
group_names_to_nodes: Dict[str, str],
unescape_funcs: Dict[str, Callable[[str], str]],
):
self.string = string
self._re_matches = re_matches
self._group_names_to_nodes = group_names_to_nodes
self._unescape_funcs = unescape_funcs
def _nodes_to_regs(self) -> List[Tuple[str, Tuple[int, int]]]:
"""
Return a list of (varname, reg) tuples.
"""
def get_tuples() -> Iterable[Tuple[str, Tuple[int, int]]]:
for r, re_match in self._re_matches:
for group_name, group_index in r.groupindex.items():
if group_name != _INVALID_TRAILING_INPUT:
regs = cast(Tuple[Tuple[int, int], ...], re_match.regs)
reg = regs[group_index]
node = self._group_names_to_nodes[group_name]
yield (node, reg)
return list(get_tuples())
def _nodes_to_values(self) -> List[Tuple[str, str, Tuple[int, int]]]:
"""
Returns list of (Node, string_value) tuples.
"""
def is_none(sl: Tuple[int, int]) -> bool:
return sl[0] == -1 and sl[1] == -1
def get(sl: Tuple[int, int]) -> str:
return self.string[sl[0] : sl[1]]
return [
(varname, get(slice), slice)
for varname, slice in self._nodes_to_regs()
if not is_none(slice)
]
def _unescape(self, varname: str, value: str) -> str:
unwrapper = self._unescape_funcs.get(varname)
return unwrapper(value) if unwrapper else value
def variables(self) -> "Variables":
"""
Returns :class:`Variables` instance.
"""
return Variables(
[(k, self._unescape(k, v), sl) for k, v, sl in self._nodes_to_values()]
)
def trailing_input(self) -> Optional["MatchVariable"]:
"""
Get the `MatchVariable` instance, representing trailing input, if there is any.
"Trailing input" is input at the end that does not match the grammar anymore, but
when this is removed from the end of the input, the input would be a valid string.
"""
slices: List[Tuple[int, int]] = []
# Find all regex group for the name _INVALID_TRAILING_INPUT.
for r, re_match in self._re_matches:
for group_name, group_index in r.groupindex.items():
if group_name == _INVALID_TRAILING_INPUT:
slices.append(re_match.regs[group_index])
# Take the smallest part. (Smaller trailing text means that a larger input has
# been matched, so that is better.)
if slices:
slice = (max(i[0] for i in slices), max(i[1] for i in slices))
value = self.string[slice[0] : slice[1]]
return MatchVariable("<trailing_input>", value, slice)
return None
def end_nodes(self) -> Iterable["MatchVariable"]:
"""
Yields `MatchVariable` instances for all the nodes having their end
position at the end of the input string.
"""
for varname, reg in self._nodes_to_regs():
# If this part goes until the end of the input string.
if reg[1] == len(self.string):
value = self._unescape(varname, self.string[reg[0] : reg[1]])
yield MatchVariable(varname, value, (reg[0], reg[1]))
class Variables:
def __init__(self, tuples: List[Tuple[str, str, Tuple[int, int]]]) -> None:
#: List of (varname, value, slice) tuples.
self._tuples = tuples
def __repr__(self) -> str:
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%r" % (k, v) for k, v, _ in self._tuples),
)
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
items = self.getall(key)
return items[0] if items else default
def getall(self, key: str) -> List[str]:
return [v for k, v, _ in self._tuples if k == key]
def __getitem__(self, key: str) -> Optional[str]:
return self.get(key)
def __iter__(self) -> Iterator["MatchVariable"]:
"""
Yield `MatchVariable` instances.
"""
for varname, value, slice in self._tuples:
yield MatchVariable(varname, value, slice)
class MatchVariable:
"""
Represents a match of a variable in the grammar.
:param varname: (string) Name of the variable.
:param value: (string) Value of this variable.
:param slice: (start, stop) tuple, indicating the position of this variable
in the input string.
"""
def __init__(self, varname: str, value: str, slice: Tuple[int, int]) -> None:
self.varname = varname
self.value = value
self.slice = slice
self.start = self.slice[0]
self.stop = self.slice[1]
def __repr__(self) -> str:
return "%s(%r, %r)" % (self.__class__.__name__, self.varname, self.value)
def compile(
expression: str,
escape_funcs: Optional[EscapeFuncDict] = None,
unescape_funcs: Optional[EscapeFuncDict] = None,
) -> _CompiledGrammar:
"""
Compile grammar (given as regex string), returning a `CompiledGrammar`
instance.
"""
return _compile_from_parse_tree(
parse_regex(tokenize_regex(expression)),
escape_funcs=escape_funcs,
unescape_funcs=unescape_funcs,
)
def _compile_from_parse_tree(
root_node: Node,
escape_funcs: Optional[EscapeFuncDict] = None,
unescape_funcs: Optional[EscapeFuncDict] = None,
) -> _CompiledGrammar:
"""
Compile grammar (given as parse tree), returning a `CompiledGrammar`
instance.
"""
return _CompiledGrammar(
root_node, escape_funcs=escape_funcs, unescape_funcs=unescape_funcs
)

View file

@ -0,0 +1,94 @@
"""
Completer for a regular grammar.
"""
from typing import Dict, Iterable, List
from prompt_toolkit.completion import CompleteEvent, Completer, Completion
from prompt_toolkit.document import Document
from .compiler import Match, _CompiledGrammar
__all__ = [
"GrammarCompleter",
]
class GrammarCompleter(Completer):
"""
Completer which can be used for autocompletion according to variables in
the grammar. Each variable can have a different autocompleter.
:param compiled_grammar: `GrammarCompleter` instance.
:param completers: `dict` mapping variable names of the grammar to the
`Completer` instances to be used for each variable.
"""
def __init__(
self, compiled_grammar: _CompiledGrammar, completers: Dict[str, Completer]
) -> None:
self.compiled_grammar = compiled_grammar
self.completers = completers
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
m = self.compiled_grammar.match_prefix(document.text_before_cursor)
if m:
completions = self._remove_duplicates(
self._get_completions_for_match(m, complete_event)
)
for c in completions:
yield c
def _get_completions_for_match(
self, match: Match, complete_event: CompleteEvent
) -> Iterable[Completion]:
"""
Yield all the possible completions for this input string.
(The completer assumes that the cursor position was at the end of the
input string.)
"""
for match_variable in match.end_nodes():
varname = match_variable.varname
start = match_variable.start
completer = self.completers.get(varname)
if completer:
text = match_variable.value
# Unwrap text.
unwrapped_text = self.compiled_grammar.unescape(varname, text)
# Create a document, for the completions API (text/cursor_position)
document = Document(unwrapped_text, len(unwrapped_text))
# Call completer
for completion in completer.get_completions(document, complete_event):
new_text = (
unwrapped_text[: len(text) + completion.start_position]
+ completion.text
)
# Wrap again.
yield Completion(
text=self.compiled_grammar.escape(varname, new_text),
start_position=start - len(match.string),
display=completion.display,
display_meta=completion.display_meta,
)
def _remove_duplicates(self, items: Iterable[Completion]) -> List[Completion]:
"""
Remove duplicates, while keeping the order.
(Sometimes we have duplicates, because the there several matches of the
same grammar, each yielding similar completions.)
"""
result: List[Completion] = []
for i in items:
if i not in result:
result.append(i)
return result

View file

@ -0,0 +1,92 @@
"""
`GrammarLexer` is compatible with other lexers and can be used to highlight
the input using a regular grammar with annotations.
"""
from typing import Callable, Dict, Optional
from prompt_toolkit.document import Document
from prompt_toolkit.formatted_text.base import StyleAndTextTuples
from prompt_toolkit.formatted_text.utils import split_lines
from prompt_toolkit.lexers import Lexer
from .compiler import _CompiledGrammar
__all__ = [
"GrammarLexer",
]
class GrammarLexer(Lexer):
"""
Lexer which can be used for highlighting of fragments according to variables in the grammar.
(It does not actual lexing of the string, but it exposes an API, compatible
with the Pygments lexer class.)
:param compiled_grammar: Grammar as returned by the `compile()` function.
:param lexers: Dictionary mapping variable names of the regular grammar to
the lexers that should be used for this part. (This can
call other lexers recursively.) If you wish a part of the
grammar to just get one fragment, use a
`prompt_toolkit.lexers.SimpleLexer`.
"""
def __init__(
self,
compiled_grammar: _CompiledGrammar,
default_style: str = "",
lexers: Optional[Dict[str, Lexer]] = None,
) -> None:
self.compiled_grammar = compiled_grammar
self.default_style = default_style
self.lexers = lexers or {}
def _get_text_fragments(self, text: str) -> StyleAndTextTuples:
m = self.compiled_grammar.match_prefix(text)
if m:
characters: StyleAndTextTuples = [(self.default_style, c) for c in text]
for v in m.variables():
# If we have a `Lexer` instance for this part of the input.
# Tokenize recursively and apply tokens.
lexer = self.lexers.get(v.varname)
if lexer:
document = Document(text[v.start : v.stop])
lexer_tokens_for_line = lexer.lex_document(document)
text_fragments: StyleAndTextTuples = []
for i in range(len(document.lines)):
text_fragments.extend(lexer_tokens_for_line(i))
text_fragments.append(("", "\n"))
if text_fragments:
text_fragments.pop()
i = v.start
for t, s, *_ in text_fragments:
for c in s:
if characters[i][0] == self.default_style:
characters[i] = (t, characters[i][1])
i += 1
# Highlight trailing input.
trailing_input = m.trailing_input()
if trailing_input:
for i in range(trailing_input.start, trailing_input.stop):
characters[i] = ("class:trailing-input", characters[i][1])
return characters
else:
return [("", text)]
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
lines = list(split_lines(self._get_text_fragments(document.text)))
def get_line(lineno: int) -> StyleAndTextTuples:
try:
return lines[lineno]
except IndexError:
return []
return get_line

View file

@ -0,0 +1,281 @@
"""
Parser for parsing a regular expression.
Take a string representing a regular expression and return the root node of its
parse tree.
usage::
root_node = parse_regex('(hello|world)')
Remarks:
- The regex parser processes multiline, it ignores all whitespace and supports
multiple named groups with the same name and #-style comments.
Limitations:
- Lookahead is not supported.
"""
import re
from typing import List, Optional
__all__ = [
"Repeat",
"Variable",
"Regex",
"Lookahead",
"tokenize_regex",
"parse_regex",
]
class Node:
"""
Base class for all the grammar nodes.
(You don't initialize this one.)
"""
def __add__(self, other_node: "Node") -> "NodeSequence":
return NodeSequence([self, other_node])
def __or__(self, other_node: "Node") -> "AnyNode":
return AnyNode([self, other_node])
class AnyNode(Node):
"""
Union operation (OR operation) between several grammars. You don't
initialize this yourself, but it's a result of a "Grammar1 | Grammar2"
operation.
"""
def __init__(self, children: List[Node]) -> None:
self.children = children
def __or__(self, other_node: Node) -> "AnyNode":
return AnyNode(self.children + [other_node])
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__.__name__, self.children)
class NodeSequence(Node):
"""
Concatenation operation of several grammars. You don't initialize this
yourself, but it's a result of a "Grammar1 + Grammar2" operation.
"""
def __init__(self, children: List[Node]) -> None:
self.children = children
def __add__(self, other_node: Node) -> "NodeSequence":
return NodeSequence(self.children + [other_node])
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__.__name__, self.children)
class Regex(Node):
"""
Regular expression.
"""
def __init__(self, regex: str) -> None:
re.compile(regex) # Validate
self.regex = regex
def __repr__(self) -> str:
return "%s(/%s/)" % (self.__class__.__name__, self.regex)
class Lookahead(Node):
"""
Lookahead expression.
"""
def __init__(self, childnode: Node, negative: bool = False) -> None:
self.childnode = childnode
self.negative = negative
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__.__name__, self.childnode)
class Variable(Node):
"""
Mark a variable in the regular grammar. This will be translated into a
named group. Each variable can have his own completer, validator, etc..
:param childnode: The grammar which is wrapped inside this variable.
:param varname: String.
"""
def __init__(self, childnode: Node, varname: str = "") -> None:
self.childnode = childnode
self.varname = varname
def __repr__(self) -> str:
return "%s(childnode=%r, varname=%r)" % (
self.__class__.__name__,
self.childnode,
self.varname,
)
class Repeat(Node):
def __init__(
self,
childnode: Node,
min_repeat: int = 0,
max_repeat: Optional[int] = None,
greedy: bool = True,
) -> None:
self.childnode = childnode
self.min_repeat = min_repeat
self.max_repeat = max_repeat
self.greedy = greedy
def __repr__(self) -> str:
return "%s(childnode=%r)" % (self.__class__.__name__, self.childnode)
def tokenize_regex(input: str) -> List[str]:
"""
Takes a string, representing a regular expression as input, and tokenizes
it.
:param input: string, representing a regular expression.
:returns: List of tokens.
"""
# Regular expression for tokenizing other regular expressions.
p = re.compile(
r"""^(
\(\?P\<[a-zA-Z0-9_-]+\> | # Start of named group.
\(\?#[^)]*\) | # Comment
\(\?= | # Start of lookahead assertion
\(\?! | # Start of negative lookahead assertion
\(\?<= | # If preceded by.
\(\?< | # If not preceded by.
\(?: | # Start of group. (non capturing.)
\( | # Start of group.
\(?[iLmsux] | # Flags.
\(?P=[a-zA-Z]+\) | # Back reference to named group
\) | # End of group.
\{[^{}]*\} | # Repetition
\*\? | \+\? | \?\?\ | # Non greedy repetition.
\* | \+ | \? | # Repetition
\#.*\n | # Comment
\\. |
# Character group.
\[
( [^\]\\] | \\.)*
\] |
[^(){}] |
.
)""",
re.VERBOSE,
)
tokens = []
while input:
m = p.match(input)
if m:
token, input = input[: m.end()], input[m.end() :]
if not token.isspace():
tokens.append(token)
else:
raise Exception("Could not tokenize input regex.")
return tokens
def parse_regex(regex_tokens: List[str]) -> Node:
"""
Takes a list of tokens from the tokenizer, and returns a parse tree.
"""
# We add a closing brace because that represents the final pop of the stack.
tokens: List[str] = [")"] + regex_tokens[::-1]
def wrap(lst: List[Node]) -> Node:
""" Turn list into sequence when it contains several items. """
if len(lst) == 1:
return lst[0]
else:
return NodeSequence(lst)
def _parse() -> Node:
or_list: List[List[Node]] = []
result: List[Node] = []
def wrapped_result() -> Node:
if or_list == []:
return wrap(result)
else:
or_list.append(result)
return AnyNode([wrap(i) for i in or_list])
while tokens:
t = tokens.pop()
if t.startswith("(?P<"):
variable = Variable(_parse(), varname=t[4:-1])
result.append(variable)
elif t in ("*", "*?"):
greedy = t == "*"
result[-1] = Repeat(result[-1], greedy=greedy)
elif t in ("+", "+?"):
greedy = t == "+"
result[-1] = Repeat(result[-1], min_repeat=1, greedy=greedy)
elif t in ("?", "??"):
if result == []:
raise Exception("Nothing to repeat." + repr(tokens))
else:
greedy = t == "?"
result[-1] = Repeat(
result[-1], min_repeat=0, max_repeat=1, greedy=greedy
)
elif t == "|":
or_list.append(result)
result = []
elif t in ("(", "(?:"):
result.append(_parse())
elif t == "(?!":
result.append(Lookahead(_parse(), negative=True))
elif t == "(?=":
result.append(Lookahead(_parse(), negative=False))
elif t == ")":
return wrapped_result()
elif t.startswith("#"):
pass
elif t.startswith("{"):
# TODO: implement!
raise Exception("{}-style repetition not yet supported".format(t))
elif t.startswith("(?"):
raise Exception("%r not supported" % t)
elif t.isspace():
pass
else:
result.append(Regex(t))
raise Exception("Expecting ')' token")
result = _parse()
if len(tokens) != 0:
raise Exception("Unmatched parentheses.")
else:
return result

View file

@ -0,0 +1,60 @@
"""
Validator for a regular language.
"""
from typing import Dict
from prompt_toolkit.document import Document
from prompt_toolkit.validation import ValidationError, Validator
from .compiler import _CompiledGrammar
__all__ = [
"GrammarValidator",
]
class GrammarValidator(Validator):
"""
Validator which can be used for validation according to variables in
the grammar. Each variable can have its own validator.
:param compiled_grammar: `GrammarCompleter` instance.
:param validators: `dict` mapping variable names of the grammar to the
`Validator` instances to be used for each variable.
"""
def __init__(
self, compiled_grammar: _CompiledGrammar, validators: Dict[str, Validator]
) -> None:
self.compiled_grammar = compiled_grammar
self.validators = validators
def validate(self, document: Document) -> None:
# Parse input document.
# We use `match`, not `match_prefix`, because for validation, we want
# the actual, unambiguous interpretation of the input.
m = self.compiled_grammar.match(document.text)
if m:
for v in m.variables():
validator = self.validators.get(v.varname)
if validator:
# Unescape text.
unwrapped_text = self.compiled_grammar.unescape(v.varname, v.value)
# Create a document, for the completions API (text/cursor_position)
inner_document = Document(unwrapped_text, len(unwrapped_text))
try:
validator.validate(inner_document)
except ValidationError as e:
raise ValidationError(
cursor_position=v.start + e.cursor_position,
message=e.message,
) from e
else:
raise ValidationError(
cursor_position=len(document.text), message="Invalid command"
)

View file

@ -0,0 +1,6 @@
from .server import PromptToolkitSSHServer, PromptToolkitSSHSession
__all__ = [
"PromptToolkitSSHSession",
"PromptToolkitSSHServer",
]

View file

@ -0,0 +1,151 @@
"""
Utility for running a prompt_toolkit application in an asyncssh server.
"""
import asyncio
import traceback
from typing import Awaitable, Callable, Optional, TextIO, cast
import asyncssh
from prompt_toolkit.application.current import AppSession, create_app_session
from prompt_toolkit.data_structures import Size
from prompt_toolkit.input import create_pipe_input
from prompt_toolkit.output.vt100 import Vt100_Output
__all__ = ["PromptToolkitSSHSession", "PromptToolkitSSHServer"]
class PromptToolkitSSHSession(asyncssh.SSHServerSession):
def __init__(
self, interact: Callable[["PromptToolkitSSHSession"], Awaitable[None]]
) -> None:
self.interact = interact
self.interact_task: Optional[asyncio.Task[None]] = None
self._chan = None
self.app_session: Optional[AppSession] = None
# PipInput object, for sending input in the CLI.
# (This is something that we can use in the prompt_toolkit event loop,
# but still write date in manually.)
self._input = create_pipe_input()
self._output = None
# Output object. Don't render to the real stdout, but write everything
# in the SSH channel.
class Stdout:
def write(s, data):
try:
if self._chan is not None:
self._chan.write(data.replace("\n", "\r\n"))
except BrokenPipeError:
pass # Channel not open for sending.
def isatty(s) -> bool:
return True
def flush(s):
pass
@property
def encoding(s):
return self._chan._orig_chan.get_encoding()[0]
self.stdout = cast(TextIO, Stdout())
def _get_size(self) -> Size:
"""
Callable that returns the current `Size`, required by Vt100_Output.
"""
if self._chan is None:
return Size(rows=20, columns=79)
else:
width, height, pixwidth, pixheight = self._chan.get_terminal_size()
return Size(rows=height, columns=width)
def connection_made(self, chan):
self._chan = chan
def shell_requested(self) -> bool:
return True
def session_started(self) -> None:
self.interact_task = asyncio.get_event_loop().create_task(self._interact())
async def _interact(self) -> None:
if self._chan is None:
# Should not happen.
raise Exception("`_interact` called before `connection_made`.")
if hasattr(self._chan, "set_line_mode") and self._chan._editor is not None:
# Disable the line editing provided by asyncssh. Prompt_toolkit
# provides the line editing.
self._chan.set_line_mode(False)
term = self._chan.get_terminal_type()
self._output = Vt100_Output(
self.stdout, self._get_size, term=term, write_binary=False
)
with create_app_session(input=self._input, output=self._output) as session:
self.app_session = session
try:
await self.interact(self)
except BaseException:
traceback.print_exc()
finally:
# Close the connection.
self._chan.close()
def terminal_size_changed(self, width, height, pixwidth, pixheight):
# Send resize event to the current application.
if self.app_session and self.app_session.app:
self.app_session.app._on_resize()
def data_received(self, data, datatype):
self._input.send_text(data)
class PromptToolkitSSHServer(asyncssh.SSHServer):
"""
Run a prompt_toolkit application over an asyncssh server.
This takes one argument, an `interact` function, which is called for each
connection. This should be an asynchronous function that runs the
prompt_toolkit applications. This function runs in an `AppSession`, which
means that we can have multiple UI interactions concurrently.
Example usage:
.. code:: python
async def interact(ssh_session: PromptToolkitSSHSession) -> None:
await yes_no_dialog("my title", "my text").run_async()
prompt_session = PromptSession()
text = await prompt_session.prompt_async("Type something: ")
print_formatted_text('You said: ', text)
server = PromptToolkitSSHServer(interact=interact)
loop = get_event_loop()
loop.run_until_complete(
asyncssh.create_server(
lambda: MySSHServer(interact),
"",
port,
server_host_keys=["/etc/ssh/..."],
)
)
loop.run_forever()
"""
def __init__(
self, interact: Callable[[PromptToolkitSSHSession], Awaitable[None]]
) -> None:
self.interact = interact
def begin_auth(self, username):
# No authentication.
return False
def session_requested(self) -> PromptToolkitSSHSession:
return PromptToolkitSSHSession(self.interact)

View file

@ -0,0 +1,5 @@
from .server import TelnetServer
__all__ = [
"TelnetServer",
]

View file

@ -0,0 +1,10 @@
"""
Python logger for the telnet server.
"""
import logging
logger = logging.getLogger(__package__)
__all__ = [
"logger",
]

View file

@ -0,0 +1,207 @@
"""
Parser for the Telnet protocol. (Not a complete implementation of the telnet
specification, but sufficient for a command line interface.)
Inspired by `Twisted.conch.telnet`.
"""
import struct
from typing import Callable, Generator
from .log import logger
__all__ = [
"TelnetProtocolParser",
]
def int2byte(number: int) -> bytes:
return bytes((number,))
# Telnet constants.
NOP = int2byte(0)
SGA = int2byte(3)
IAC = int2byte(255)
DO = int2byte(253)
DONT = int2byte(254)
LINEMODE = int2byte(34)
SB = int2byte(250)
WILL = int2byte(251)
WONT = int2byte(252)
MODE = int2byte(1)
SE = int2byte(240)
ECHO = int2byte(1)
NAWS = int2byte(31)
LINEMODE = int2byte(34)
SUPPRESS_GO_AHEAD = int2byte(3)
TTYPE = int2byte(24)
SEND = int2byte(1)
IS = int2byte(0)
DM = int2byte(242)
BRK = int2byte(243)
IP = int2byte(244)
AO = int2byte(245)
AYT = int2byte(246)
EC = int2byte(247)
EL = int2byte(248)
GA = int2byte(249)
class TelnetProtocolParser:
"""
Parser for the Telnet protocol.
Usage::
def data_received(data):
print(data)
def size_received(rows, columns):
print(rows, columns)
p = TelnetProtocolParser(data_received, size_received)
p.feed(binary_data)
"""
def __init__(
self,
data_received_callback: Callable[[bytes], None],
size_received_callback: Callable[[int, int], None],
ttype_received_callback: Callable[[str], None],
) -> None:
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self.ttype_received_callback = ttype_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None) # type: ignore
def received_data(self, data: bytes) -> None:
self.data_received_callback(data)
def do_received(self, data: bytes) -> None:
""" Received telnet DO command. """
logger.info("DO %r", data)
def dont_received(self, data: bytes) -> None:
""" Received telnet DONT command. """
logger.info("DONT %r", data)
def will_received(self, data: bytes) -> None:
""" Received telnet WILL command. """
logger.info("WILL %r", data)
def wont_received(self, data: bytes) -> None:
""" Received telnet WONT command. """
logger.info("WONT %r", data)
def command_received(self, command: bytes, data: bytes) -> None:
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info("command received %r %r", command, data)
def naws(self, data: bytes) -> None:
"""
Received NAWS. (Window dimensions.)
"""
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack(str("!HH"), data)
self.size_received_callback(rows, columns)
else:
logger.warning("Wrong number of NAWS bytes")
def ttype(self, data: bytes) -> None:
"""
Received terminal type.
"""
subcmd, data = data[0:1], data[1:]
if subcmd == IS:
ttype = data.decode("ascii")
self.ttype_received_callback(ttype)
else:
logger.warning("Received a non-IS terminal type Subnegotiation")
def negotiate(self, data: bytes) -> None:
"""
Got negotiate data.
"""
command, payload = data[0:1], data[1:]
if command == NAWS:
self.naws(payload)
elif command == TTYPE:
self.ttype(payload)
else:
logger.info("Negotiate (%r got bytes)", len(data))
def _parse_coroutine(self) -> Generator[None, bytes, None]:
"""
Parser state machine.
Every 'yield' expression returns the next byte.
"""
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, b"")
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b"".join(data))
else:
self.received_data(d)
def feed(self, data: bytes) -> None:
"""
Feed data to the parser.
"""
for b in data:
self._parser.send(int2byte(b))

View file

@ -0,0 +1,349 @@
"""
Telnet server.
"""
import asyncio
import contextvars # Requires Python3.7!
import socket
from asyncio import get_event_loop
from typing import Awaitable, Callable, List, Optional, Set, TextIO, Tuple, cast
from prompt_toolkit.application.current import create_app_session, get_app
from prompt_toolkit.application.run_in_terminal import run_in_terminal
from prompt_toolkit.data_structures import Size
from prompt_toolkit.formatted_text import AnyFormattedText, to_formatted_text
from prompt_toolkit.input import create_pipe_input
from prompt_toolkit.output.vt100 import Vt100_Output
from prompt_toolkit.renderer import print_formatted_text as print_formatted_text
from prompt_toolkit.styles import BaseStyle, DummyStyle
from .log import logger
from .protocol import (
DO,
ECHO,
IAC,
LINEMODE,
MODE,
NAWS,
SB,
SE,
SEND,
SUPPRESS_GO_AHEAD,
TTYPE,
WILL,
TelnetProtocolParser,
)
__all__ = [
"TelnetServer",
]
def int2byte(number: int) -> bytes:
return bytes((number,))
def _initialize_telnet(connection: socket.socket) -> None:
logger.info("Initializing telnet connection")
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
# Negotiate terminal type
# Assume the client will accept the negociation with `IAC + WILL + TTYPE`
connection.send(IAC + DO + TTYPE)
# We can then select the first terminal type supported by the client,
# which is generally the best type the client supports
# The client should reply with a `IAC + SB + TTYPE + IS + ttype + IAC + SE`
connection.send(IAC + SB + TTYPE + SEND + IAC + SE)
class _ConnectionStdout:
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection: socket.socket, encoding: str) -> None:
self._encoding = encoding
self._connection = connection
self._errors = "strict"
self._buffer: List[bytes] = []
def write(self, data: str) -> None:
data = data.replace("\n", "\r\n")
self._buffer.append(data.encode(self._encoding, errors=self._errors))
self.flush()
def isatty(self) -> bool:
return True
def flush(self) -> None:
try:
self._connection.send(b"".join(self._buffer))
except socket.error as e:
logger.warning("Couldn't send data over socket: %s" % e)
self._buffer = []
@property
def encoding(self) -> str:
return self._encoding
@property
def errors(self) -> str:
return self._errors
class TelnetConnection:
"""
Class that represents one Telnet connection.
"""
def __init__(
self,
conn: socket.socket,
addr: Tuple[str, int],
interact: Callable[["TelnetConnection"], Awaitable[None]],
server: "TelnetServer",
encoding: str,
style: Optional[BaseStyle],
) -> None:
self.conn = conn
self.addr = addr
self.interact = interact
self.server = server
self.encoding = encoding
self.style = style
self._closed = False
self._ready = asyncio.Event()
self.vt100_output = None
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create input.
self.vt100_input = create_pipe_input()
# Create output.
def get_size() -> Size:
return self.size
self.stdout = cast(TextIO, _ConnectionStdout(conn, encoding=encoding))
def data_received(data: bytes) -> None:
""" TelnetProtocolParser 'data_received' callback """
self.vt100_input.send_bytes(data)
def size_received(rows: int, columns: int) -> None:
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
if self.vt100_output is not None:
get_app()._on_resize()
def ttype_received(ttype: str) -> None:
""" TelnetProtocolParser 'ttype_received' callback """
self.vt100_output = Vt100_Output(
self.stdout, get_size, term=ttype, write_binary=False
)
self._ready.set()
self.parser = TelnetProtocolParser(data_received, size_received, ttype_received)
self.context: Optional[contextvars.Context] = None
async def run_application(self) -> None:
"""
Run application.
"""
def handle_incoming_data() -> None:
data = self.conn.recv(1024)
if data:
self.feed(data)
else:
# Connection closed by client.
logger.info("Connection closed by client. %r %r" % self.addr)
self.close()
# Add reader.
loop = get_event_loop()
loop.add_reader(self.conn, handle_incoming_data)
try:
# Wait for v100_output to be properly instantiated
await self._ready.wait()
with create_app_session(input=self.vt100_input, output=self.vt100_output):
self.context = contextvars.copy_context()
await self.interact(self)
except Exception as e:
print("Got %s" % type(e).__name__, e)
import traceback
traceback.print_exc()
raise
finally:
self.close()
def feed(self, data: bytes) -> None:
"""
Handler for incoming data. (Called by TelnetServer.)
"""
self.parser.feed(data)
def close(self) -> None:
"""
Closed by client.
"""
if not self._closed:
self._closed = True
self.vt100_input.close()
get_event_loop().remove_reader(self.conn)
self.conn.close()
def send(self, formatted_text: AnyFormattedText) -> None:
"""
Send text to the client.
"""
if self.vt100_output is None:
return
formatted_text = to_formatted_text(formatted_text)
print_formatted_text(
self.vt100_output, formatted_text, self.style or DummyStyle()
)
def send_above_prompt(self, formatted_text: AnyFormattedText) -> None:
"""
Send text to the client.
This is asynchronous, returns a `Future`.
"""
formatted_text = to_formatted_text(formatted_text)
return self._run_in_terminal(lambda: self.send(formatted_text))
def _run_in_terminal(self, func: Callable[[], None]) -> None:
# Make sure that when an application was active for this connection,
# that we print the text above the application.
if self.context:
self.context.run(run_in_terminal, func)
else:
raise RuntimeError("Called _run_in_terminal outside `run_application`.")
def erase_screen(self) -> None:
"""
Erase the screen and move the cursor to the top.
"""
if self.vt100_output is None:
return
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
async def _dummy_interact(connection: TelnetConnection) -> None:
pass
class TelnetServer:
"""
Telnet server implementation.
"""
def __init__(
self,
host: str = "127.0.0.1",
port: int = 23,
interact: Callable[[TelnetConnection], Awaitable[None]] = _dummy_interact,
encoding: str = "utf-8",
style: Optional[BaseStyle] = None,
) -> None:
self.host = host
self.port = port
self.interact = interact
self.encoding = encoding
self.style = style
self._application_tasks: List[asyncio.Task] = []
self.connections: Set[TelnetConnection] = set()
self._listen_socket: Optional[socket.socket] = None
@classmethod
def _create_socket(cls, host: str, port: int) -> socket.socket:
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def start(self) -> None:
"""
Start the telnet server.
Don't forget to call `loop.run_forever()` after doing this.
"""
self._listen_socket = self._create_socket(self.host, self.port)
logger.info(
"Listening for telnet connections on %s port %r", self.host, self.port
)
get_event_loop().add_reader(self._listen_socket, self._accept)
async def stop(self) -> None:
if self._listen_socket:
get_event_loop().remove_reader(self._listen_socket)
self._listen_socket.close()
# Wait for all applications to finish.
for t in self._application_tasks:
t.cancel()
for t in self._application_tasks:
await t
def _accept(self) -> None:
"""
Accept new incoming connection.
"""
if self._listen_socket is None:
return # Should not happen. `_accept` is called after `start`.
conn, addr = self._listen_socket.accept()
logger.info("New connection %r %r", *addr)
connection = TelnetConnection(
conn, addr, self.interact, self, encoding=self.encoding, style=self.style
)
self.connections.add(connection)
# Run application for this connection.
async def run() -> None:
logger.info("Starting interaction %r %r", *addr)
try:
await connection.run_application()
except Exception as e:
print(e)
finally:
self.connections.remove(connection)
self._application_tasks.remove(task)
logger.info("Stopping interaction %r %r", *addr)
task = get_event_loop().create_task(run())
self._application_tasks.append(task)

View file

@ -0,0 +1,10 @@
from typing import NamedTuple
__all__ = [
"Point",
"Size",
]
Point = NamedTuple("Point", [("x", int), ("y", int)])
Size = NamedTuple("Size", [("rows", int), ("columns", int)])

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,17 @@
from enum import Enum
class EditingMode(Enum):
# The set of key bindings that is active.
VI = "VI"
EMACS = "EMACS"
#: Name of the search buffer.
SEARCH_BUFFER = "SEARCH_BUFFER"
#: Name of the default buffer.
DEFAULT_BUFFER = "DEFAULT_BUFFER"
#: Name of the system buffer.
SYSTEM_BUFFER = "SYSTEM_BUFFER"

View file

@ -0,0 +1,26 @@
from .async_generator import generator_to_async_generator
from .inputhook import (
InputHookContext,
InputHookSelector,
new_eventloop_with_inputhook,
set_eventloop_with_inputhook,
)
from .utils import (
call_soon_threadsafe,
get_traceback_from_context,
run_in_executor_with_context,
)
__all__ = [
# Async generator
"generator_to_async_generator",
# Utils.
"run_in_executor_with_context",
"call_soon_threadsafe",
"get_traceback_from_context",
# Inputhooks.
"new_eventloop_with_inputhook",
"set_eventloop_with_inputhook",
"InputHookSelector",
"InputHookContext",
]

View file

@ -0,0 +1,124 @@
"""
@asynccontextmanager code, copied from Python 3.7's contextlib.
For usage in Python 3.6.
"""
import abc
from functools import wraps
import _collections_abc
__all__ = ["asynccontextmanager"]
class AbstractAsyncContextManager(abc.ABC):
"""An abstract base class for asynchronous context managers."""
async def __aenter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractAsyncContextManager:
return _collections_abc._check_methods(C, "__aenter__", "__aexit__")
return NotImplemented
class _GeneratorContextManagerBase:
"""Shared functionality for @contextmanager and @asynccontextmanager."""
def __init__(self, func, args, kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
class _AsyncGeneratorContextManager(
_GeneratorContextManagerBase, AbstractAsyncContextManager
):
"""Helper for @asynccontextmanager."""
async def __aenter__(self):
try:
return await self.gen.__anext__()
except StopAsyncIteration:
raise RuntimeError("generator didn't yield") from None
async def __aexit__(self, typ, value, traceback):
if typ is None:
try:
await self.gen.__anext__()
except StopAsyncIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = typ()
# See _GeneratorContextManager.__exit__ for comments on subtleties
# in this implementation
try:
await self.gen.athrow(typ, value, traceback)
raise RuntimeError("generator didn't stop after athrow()")
except StopAsyncIteration as exc:
return exc is not value
except RuntimeError as exc:
if exc is value:
return False
# Avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479 for sync generators; async generators also
# have this behavior). But do this only if the exception wrapped
# by the RuntimeError is actully Stop(Async)Iteration (see
# issue29692).
if isinstance(value, (StopIteration, StopAsyncIteration)):
if exc.__cause__ is value:
return False
raise
except BaseException as exc:
if exc is not value:
raise
def asynccontextmanager(func):
"""@asynccontextmanager decorator.
Typical usage:
@asynccontextmanager
async def some_async_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
async with some_async_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _AsyncGeneratorContextManager(func, args, kwds)
return helper

View file

@ -0,0 +1,67 @@
"""
Implementation for async generators.
"""
from asyncio import Queue, get_event_loop
from typing import AsyncGenerator, Callable, Iterable, TypeVar, Union
from .utils import run_in_executor_with_context
__all__ = [
"generator_to_async_generator",
]
_T = TypeVar("_T")
class _Done:
pass
async def generator_to_async_generator(
get_iterable: Callable[[], Iterable[_T]]
) -> AsyncGenerator[_T, None]:
"""
Turn a generator or iterable into an async generator.
This works by running the generator in a background thread.
:param get_iterable: Function that returns a generator or iterable when
called.
"""
quitting = False
_done = _Done()
q: Queue[Union[_T, _Done]] = Queue()
loop = get_event_loop()
def runner() -> None:
"""
Consume the generator in background thread.
When items are received, they'll be pushed to the queue.
"""
try:
for item in get_iterable():
loop.call_soon_threadsafe(q.put_nowait, item)
# When this async generator was cancelled (closed), stop this
# thread.
if quitting:
break
finally:
loop.call_soon_threadsafe(q.put_nowait, _done)
# Start background thread.
run_in_executor_with_context(runner)
try:
while True:
item = await q.get()
if isinstance(item, _Done):
break
else:
yield item
finally:
# When this async generator is closed (GeneratorExit exception, stop
# the background thread as well. - we don't need that anymore.)
quitting = True

View file

@ -0,0 +1,46 @@
"""
Dummy contextvars implementation, to make prompt_toolkit work on Python 3.6.
As long as there is only one application running at a time, we don't need the
real contextvars. So, stuff like the telnet-server and so on requires 3.7.
"""
from typing import Any, Callable, Generic, Optional, TypeVar
def copy_context() -> "Context":
return Context()
_T = TypeVar("_T")
class Context:
def run(self, callable: Callable[..., _T], *args: Any, **kwargs: Any) -> _T:
return callable(*args, **kwargs)
class Token(Generic[_T]):
pass
class ContextVar(Generic[_T]):
def __init__(self, name: str, *, default: Optional[_T] = None) -> None:
self._name = name
self._value = default
@property
def name(self) -> str:
return self._name
def get(self, default: Optional[_T] = None) -> _T:
result = self._value or default
if result is None:
raise LookupError
return result
def set(self, value: _T) -> Token[_T]:
self._value = value
return Token()
def reset(self, token: Token[_T]) -> None:
pass

View file

@ -0,0 +1,170 @@
"""
Similar to `PyOS_InputHook` of the Python API, we can plug in an input hook in
the asyncio event loop.
The way this works is by using a custom 'selector' that runs the other event
loop until the real selector is ready.
It's the responsibility of this event hook to return when there is input ready.
There are two ways to detect when input is ready:
The inputhook itself is a callable that receives an `InputHookContext`. This
callable should run the other event loop, and return when the main loop has
stuff to do. There are two ways to detect when to return:
- Call the `input_is_ready` method periodically. Quit when this returns `True`.
- Add the `fileno` as a watch to the external eventloop. Quit when file descriptor
becomes readable. (But don't read from it.)
Note that this is not the same as checking for `sys.stdin.fileno()`. The
eventloop of prompt-toolkit allows thread-based executors, for example for
asynchronous autocompletion. When the completion for instance is ready, we
also want prompt-toolkit to gain control again in order to display that.
"""
import asyncio
import os
import select
import selectors
import threading
from asyncio import AbstractEventLoop, get_event_loop
from selectors import BaseSelector
from typing import Callable
from prompt_toolkit.utils import is_windows
__all__ = [
"new_eventloop_with_inputhook",
"set_eventloop_with_inputhook",
"InputHookSelector",
"InputHookContext",
]
def new_eventloop_with_inputhook(
inputhook: Callable[["InputHookContext"], None]
) -> AbstractEventLoop:
"""
Create a new event loop with the given inputhook.
"""
selector = InputHookSelector(selectors.DefaultSelector(), inputhook)
loop = asyncio.SelectorEventLoop(selector)
return loop
def set_eventloop_with_inputhook(
inputhook: Callable[["InputHookContext"], None]
) -> AbstractEventLoop:
"""
Create a new event loop with the given inputhook, and activate it.
"""
loop = new_eventloop_with_inputhook(inputhook)
asyncio.set_event_loop(loop)
return loop
class InputHookSelector(BaseSelector):
"""
Usage:
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(InputHookSelector(selector, inputhook))
asyncio.set_event_loop(loop)
"""
def __init__(
self, selector: BaseSelector, inputhook: Callable[["InputHookContext"], None]
) -> None:
self.selector = selector
self.inputhook = inputhook
self._r, self._w = os.pipe()
def register(self, fileobj, events, data=None):
return self.selector.register(fileobj, events, data=data)
def unregister(self, fileobj):
return self.selector.unregister(fileobj)
def modify(self, fileobj, events, data=None):
return self.selector.modify(fileobj, events, data=None)
def select(self, timeout=None):
# If there are tasks in the current event loop,
# don't run the input hook.
if len(get_event_loop()._ready) > 0:
return self.selector.select(timeout=timeout)
ready = False
result = None
# Run selector in other thread.
def run_selector() -> None:
nonlocal ready, result
result = self.selector.select(timeout=timeout)
os.write(self._w, b"x")
ready = True
th = threading.Thread(target=run_selector)
th.start()
def input_is_ready() -> bool:
return ready
# Call inputhook.
# The inputhook function is supposed to return when our selector
# becomes ready. The inputhook can do that by registering the fd in its
# own loop, or by checking the `input_is_ready` function regularly.
self.inputhook(InputHookContext(self._r, input_is_ready))
# Flush the read end of the pipe.
try:
# Before calling 'os.read', call select.select. This is required
# when the gevent monkey patch has been applied. 'os.read' is never
# monkey patched and won't be cooperative, so that would block all
# other select() calls otherwise.
# See: http://www.gevent.org/gevent.os.html
# Note: On Windows, this is apparently not an issue.
# However, if we would ever want to add a select call, it
# should use `windll.kernel32.WaitForMultipleObjects`,
# because `select.select` can't wait for a pipe on Windows.
if not is_windows():
select.select([self._r], [], [], None)
os.read(self._r, 1024)
except OSError:
# This happens when the window resizes and a SIGWINCH was received.
# We get 'Error: [Errno 4] Interrupted system call'
# Just ignore.
pass
# Wait for the real selector to be done.
th.join()
return result
def close(self) -> None:
"""
Clean up resources.
"""
if self._r:
os.close(self._r)
os.close(self._w)
self._r = self._w = -1
self.selector.close()
def get_map(self):
return self.selector.get_map()
class InputHookContext:
"""
Given as a parameter to the inputhook.
"""
def __init__(self, fileno: int, input_is_ready: Callable[[], bool]) -> None:
self._fileno = fileno
self.input_is_ready = input_is_ready
def fileno(self) -> int:
return self._fileno

View file

@ -0,0 +1,100 @@
import sys
import time
from asyncio import AbstractEventLoop, get_event_loop
from types import TracebackType
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar
try:
import contextvars
except ImportError:
from . import dummy_contextvars as contextvars # type: ignore
__all__ = [
"run_in_executor_with_context",
"call_soon_threadsafe",
"get_traceback_from_context",
]
_T = TypeVar("_T")
def run_in_executor_with_context(
func: Callable[..., _T], *args: Any, loop: Optional[AbstractEventLoop] = None
) -> Awaitable[_T]:
"""
Run a function in an executor, but make sure it uses the same contextvars.
This is required so that the function will see the right application.
See also: https://bugs.python.org/issue34014
"""
loop = loop or get_event_loop()
ctx: contextvars.Context = contextvars.copy_context()
return loop.run_in_executor(None, ctx.run, func, *args)
def call_soon_threadsafe(
func: Callable[[], None],
max_postpone_time: Optional[float] = None,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Wrapper around asyncio's `call_soon_threadsafe`.
This takes a `max_postpone_time` which can be used to tune the urgency of
the method.
Asyncio runs tasks in first-in-first-out. However, this is not what we
want for the render function of the prompt_toolkit UI. Rendering is
expensive, but since the UI is invalidated very often, in some situations
we render the UI too often, so much that the rendering CPU usage slows down
the rest of the processing of the application. (Pymux is an example where
we have to balance the CPU time spend on rendering the UI, and parsing
process output.)
However, we want to set a deadline value, for when the rendering should
happen. (The UI should stay responsive).
"""
loop2 = loop or get_event_loop()
# If no `max_postpone_time` has been given, schedule right now.
if max_postpone_time is None:
loop2.call_soon_threadsafe(func)
return
max_postpone_until = time.time() + max_postpone_time
def schedule() -> None:
# When there are no other tasks scheduled in the event loop. Run it
# now.
# Notice: uvloop doesn't have this _ready attribute. In that case,
# always call immediately.
if not getattr(loop2, "_ready", []): # type: ignore
func()
return
# If the timeout expired, run this now.
if time.time() > max_postpone_until:
func()
return
# Schedule again for later.
loop2.call_soon_threadsafe(schedule)
loop2.call_soon_threadsafe(schedule)
def get_traceback_from_context(context: Dict[str, Any]) -> Optional[TracebackType]:
"""
Get the traceback object from the context.
"""
exception = context.get("exception")
if exception:
if hasattr(exception, "__traceback__"):
return exception.__traceback__
else:
# call_exception_handler() is usually called indirectly
# from an except block. If it's not the case, the traceback
# is undefined...
return sys.exc_info()[2]
return None

View file

@ -0,0 +1,61 @@
from ctypes import pointer, windll
from ctypes.wintypes import BOOL, DWORD, HANDLE
from typing import List, Optional
from prompt_toolkit.win32_types import SECURITY_ATTRIBUTES
__all__ = ["wait_for_handles", "create_win32_event"]
WAIT_TIMEOUT = 0x00000102
INFINITE = -1
def wait_for_handles(
handles: List[HANDLE], timeout: int = INFINITE
) -> Optional[HANDLE]:
"""
Waits for multiple handles. (Similar to 'select') Returns the handle which is ready.
Returns `None` on timeout.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx
Note that handles should be a list of `HANDLE` objects, not integers. See
this comment in the patch by @quark-zju for the reason why:
''' Make sure HANDLE on Windows has a correct size
Previously, the type of various HANDLEs are native Python integer
types. The ctypes library will treat them as 4-byte integer when used
in function arguments. On 64-bit Windows, HANDLE is 8-byte and usually
a small integer. Depending on whether the extra 4 bytes are zero-ed out
or not, things can happen to work, or break. '''
This function returns either `None` or one of the given `HANDLE` objects.
(The return value can be tested with the `is` operator.)
"""
arrtype = HANDLE * len(handles)
handle_array = arrtype(*handles)
ret = windll.kernel32.WaitForMultipleObjects(
len(handle_array), handle_array, BOOL(False), DWORD(timeout)
)
if ret == WAIT_TIMEOUT:
return None
else:
return handles[ret]
def create_win32_event() -> HANDLE:
"""
Creates a Win32 unnamed Event .
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682396(v=vs.85).aspx
"""
return HANDLE(
windll.kernel32.CreateEventA(
pointer(SECURITY_ATTRIBUTES()),
BOOL(True), # Manual reset event.
BOOL(False), # Initial state.
None, # Unnamed event object.
)
)

Some files were not shown because too many files have changed in this diff Show more