Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,917 @@
"""
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
"""
import os
import sys
import warnings
from functools import wraps
import parso
from parso.python import tree
from jedi._compatibility import force_unicode, cast_path, is_py3
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.file_io import KnownContentFileIO
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.helpers import validate_line_column
from jedi.api.completion import Completion, search_in_module
from jedi.api.keywords import KeywordName
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project, Project
from jedi.api.errors import parso_to_jedi_errors
from jedi.api import refactoring
from jedi.api.refactoring.extract import extract_function, extract_variable
from jedi.inference import InferenceState
from jedi.inference import imports
from jedi.inference.references import find_references
from jedi.inference.arguments import try_iter_content
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.sys_path import transform_path_to_dotted
from jedi.inference.syntax_tree import tree_name_to_values
from jedi.inference.value import ModuleValue
from jedi.inference.base_value import ValueSet
from jedi.inference.value.iterable import unpack_tuple_to_dict
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.gradual.utils import load_proper_stub_module
from jedi.inference.utils import to_list
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
def _no_python2_support(func):
# TODO remove when removing Python 2/3.5
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inference_state.grammar.version_info < (3, 6) or sys.version_info < (3, 6):
raise NotImplementedError(
"No support for refactorings/search on Python 2/3.5"
)
return func(self, *args, **kwargs)
return wrapper
class Script(object):
"""
A Script is the base for completions, goto or whatever you want to do with
Jedi. The counter part of this class is :class:`Interpreter`, which works
with actual dictionaries and can work with a REPL. This class
should be used when a user edits code in an editor.
You can either use the ``code`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The Script's ``sys.path`` is very customizable:
- If `project` is provided with a ``sys_path``, that is going to be used.
- If `environment` is provided, its ``sys.path`` will be used
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
- Otherwise ``sys.path`` will match that of the default environment of
Jedi, which typically matches the sys path that was used at the time
when Jedi was imported.
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
always 1-based and columns are always zero based. To avoid repetition they
are not always documented. You can omit both line and column. Jedi will
then just do whatever action you are calling at the end of the file. If you
provide only the line, just will complete at the end of that line.
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
that parso reuses modules (i.e. they are not immutable). With this setting
Jedi is **not thread safe** and it is also not safe to use multiple
:class:`.Script` instances and its definitions at the same time.
If you are a normal plugin developer this should not be an issue. It is
an issue for people that do more complex stuff with Jedi.
This is purely a performance optimization and works pretty well for all
typical usages, however consider to turn the setting off if it causes
you problems. See also
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
:param code: The source code of the current file, separated by newlines.
:type code: str
:param line: Deprecated, please use it directly on e.g. ``.complete``
:type line: int
:param column: Deprecated, please use it directly on e.g. ``.complete``
:type column: int
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or None
:param encoding: Deprecated, cast to unicode yourself. The encoding of
``code``, if it is not a ``unicode`` object (default ``'utf-8'``).
:type encoding: str
:param sys_path: Deprecated, use the project parameter.
:type sys_path: typing.List[str]
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
to work with a specific Python version or virtualenv.
:param Project project: Provide a :class:`.Project` to make sure finding
references works well, because the right folder is searched. There are
also ways to modify the sys path and other things.
"""
def __init__(self, code=None, line=None, column=None, path=None,
encoding=None, sys_path=None, environment=None,
project=None, source=None):
self._orig_path = path
# An empty path (also empty string) should always result in no path.
self.path = os.path.abspath(path) if path else None
if encoding is None:
encoding = 'utf-8'
else:
warnings.warn(
"Deprecated since version 0.17.0. You should cast to valid "
"unicode yourself, especially if you are not using utf-8.",
DeprecationWarning,
stacklevel=2
)
if line is not None:
warnings.warn(
"Providing the line is now done in the functions themselves "
"like `Script(...).complete(line, column)`",
DeprecationWarning,
stacklevel=2
)
if column is not None:
warnings.warn(
"Providing the column is now done in the functions themselves "
"like `Script(...).complete(line, column)`",
DeprecationWarning,
stacklevel=2
)
if source is not None:
code = source
warnings.warn(
"Use the code keyword argument instead.",
DeprecationWarning,
stacklevel=2
)
if code is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
code = f.read()
if sys_path is not None and not is_py3:
sys_path = list(map(force_unicode, sys_path))
if project is None:
# Load the Python grammar of the current interpreter.
project = get_default_project(
os.path.dirname(self.path) if path else None
)
# TODO deprecate and remove sys_path from the Script API.
if sys_path is not None:
project._sys_path = sys_path
warnings.warn(
"Deprecated since version 0.17.0. Use the project API instead, "
"which means Script(project=Project(dir, sys_path=sys_path)) instead.",
DeprecationWarning,
stacklevel=2
)
self._inference_state = InferenceState(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, code = self._inference_state.parse_and_get_code(
code=code,
path=self.path,
encoding=encoding,
use_latest_grammar=path and path.endswith('.pyi'),
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(code, keepends=True)
self._code = code
self._pos = line, column
cache.clear_time_caches()
debug.reset_time()
# Cache the module, this is mostly useful for testing, since this shouldn't
# be called multiple times.
@cache.memoize_method
def _get_module(self):
names = None
is_package = False
if self.path is not None:
import_names, is_p = transform_path_to_dotted(
self._inference_state.get_sys_path(add_parent_paths=False),
self.path
)
if import_names is not None:
names = import_names
is_package = is_p
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(cast_path(self.path), self._code)
if self.path is not None and self.path.endswith('.pyi'):
# We are in a stub file. Try to load the stub properly.
stub_module = load_proper_stub_module(
self._inference_state,
file_io,
names,
self._module_node
)
if stub_module is not None:
return stub_module
if names is None:
names = ('__main__',)
module = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=names,
code_lines=self._code_lines,
is_package=is_package,
)
if names[0] not in ('builtins', '__builtin__', 'typing'):
# These modules are essential for Jedi, so don't overwrite them.
self._inference_state.module_cache.add(names, ValueSet([module]))
return module
def _get_module_context(self):
return self._get_module().as_context()
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._inference_state.environment,
)
@validate_line_column
def complete(self, line=None, column=None, **kwargs):
"""
Completes objects under the cursor.
Those objects contain information about the completions, more than just
names.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:return: Completion objects, sorted by name. Normal names appear
before "private" names that start with ``_`` and those appear
before magic methods and name mangled names that start with ``__``.
:rtype: list of :class:`.Completion`
"""
return self._complete(line, column, **kwargs)
def _complete(self, line, column, fuzzy=False): # Python 2...
with debug.increase_indent_cm('complete'):
completion = Completion(
self._inference_state, self._get_module_context(), self._code_lines,
(line, column), self.get_signatures, fuzzy=fuzzy,
)
return completion.complete()
def completions(self, fuzzy=False):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).complete instead.",
DeprecationWarning,
stacklevel=2
)
return self.complete(*self._pos, fuzzy=fuzzy)
@validate_line_column
def infer(self, line=None, column=None, **kwargs):
"""
Return the definitions of under the cursor. It is basically a wrapper
around Jedi's type inference.
This method follows complicated paths and returns the end, not the
first definition. The big difference between :meth:`goto` and
:meth:`infer` is that :meth:`goto` doesn't
follow imports and statements. Multiple objects may be returned,
because depending on an option you can have two different versions of a
function.
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
with debug.increase_indent_cm('infer'):
return self._infer(line, column, **kwargs)
def goto_definitions(self, **kwargs):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).infer instead.",
DeprecationWarning,
stacklevel=2
)
return self.infer(*self._pos, **kwargs)
def _infer(self, line, column, only_stubs=False, prefer_stubs=False):
pos = line, column
leaf = self._module_node.get_name_of_position(pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(pos)
if leaf is None or leaf.type == 'string':
return []
context = self._get_module_context().create_context(leaf)
values = helpers.infer(self._inference_state, context, leaf)
values = convert_values(
values,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, c.name) for c in values]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False, **kwargs):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).goto instead.",
DeprecationWarning,
stacklevel=2
)
return self.goto(*self._pos,
follow_imports=follow_imports,
follow_builtin_imports=follow_builtin_imports,
**kwargs)
@validate_line_column
def goto(self, line=None, column=None, **kwargs):
"""
Goes to the name that defined the object under the cursor. Optionally
you can follow imports.
Multiple objects may be returned, depending on an if you can have two
different versions of a function.
:param follow_imports: The method will follow imports.
:param follow_builtin_imports: If ``follow_imports`` is True will try
to look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
with debug.increase_indent_cm('goto'):
return self._goto(line, column, **kwargs)
def _goto(self, line, column, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Without a name we really just want to jump to the result e.g.
# executed by `foo()`, if we the cursor is after `)`.
return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs)
name = self._get_module_context().create_name(tree_name)
# Make it possible to goto the super class function/attribute
# definitions, when they are overwritten.
names = []
if name.tree_name.is_definition() and name.parent_context.is_class():
class_node = name.parent_context.tree_node
class_value = self._get_module_context().create_value(class_node)
mro = class_value.py__mro__()
next(mro) # Ignore the first entry, because it's the class itself.
for cls in mro:
names = cls.goto(tree_name.value)
if names:
break
if not names:
names = list(name.goto())
if follow_imports:
names = helpers.filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, d) for d in set(names)]
# Avoid duplicates
return list(set(helpers.sorted_definitions(defs)))
@_no_python2_support
def search(self, string, **kwargs):
"""
Searches a name in the current file. For a description of how the
search string should look like, please have a look at
:meth:`.Project.search`.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search(string, **kwargs) # Python 2 ...
def _search(self, string, all_scopes=False):
return self._search_func(string, all_scopes=all_scopes)
@to_list
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
names = self._names(all_scopes=all_scopes)
wanted_type, wanted_names = helpers.split_search_string(string)
return search_in_module(
self._inference_state,
self._get_module_context(),
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
fuzzy=fuzzy,
)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. If you want to
have all possible definitions in a file you can also provide an empty
string.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@validate_line_column
def help(self, line=None, column=None):
"""
Used to display a help window to users. Uses :meth:`.Script.goto` and
returns additional definitions for keywords and operators.
Typically you will want to display :meth:`.BaseName.docstring` to the
user for all the returned definitions.
The additional definitions are ``Name(...).type == 'keyword'``.
These definitions do not have a lot of value apart from their docstring
attribute, which contains the output of Python's :func:`help` function.
:rtype: list of :class:`.Name`
"""
definitions = self.goto(line, column, follow_imports=True)
if definitions:
return definitions
leaf = self._module_node.get_leaf_for_position((line, column))
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
def need_pydoc():
if leaf.value in ('(', ')', '[', ']'):
if leaf.parent.type == 'trailer':
return False
if leaf.parent.type == 'atom':
return False
grammar = self._inference_state.grammar
# This parso stuff is not public, but since I control it, this
# is fine :-) ~dave
reserved = grammar._pgen_grammar.reserved_syntax_strings.keys()
return leaf.value in reserved
if need_pydoc():
name = KeywordName(self._inference_state, leaf.value)
return [classes.Name(self._inference_state, name)]
return []
def usages(self, **kwargs):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).get_references instead.",
DeprecationWarning,
stacklevel=2
)
return self.get_references(*self._pos, **kwargs)
@validate_line_column
def get_references(self, line=None, column=None, **kwargs):
"""
Lists all references of a variable in a project. Since this can be
quite hard to do for Jedi, if it is too complicated, Jedi will stop
searching.
:param include_builtins: Default ``True``. If ``False``, checks if a reference
is a builtin (e.g. ``sys``) and in that case does not return it.
:param scope: Default ``'project'``. If ``'file'``, include references in
the current module only.
:rtype: list of :class:`.Name`
"""
def _references(include_builtins=True, scope='project'):
if scope not in ('project', 'file'):
raise ValueError('Only the scopes "file" and "project" are allowed')
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Must be syntax
return []
names = find_references(self._get_module_context(), tree_name, scope == 'file')
definitions = [classes.Name(self._inference_state, n) for n in names]
if not include_builtins or scope == 'file':
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _references(**kwargs)
def call_signatures(self):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).get_signatures instead.",
DeprecationWarning,
stacklevel=2
)
return self.get_signatures(*self._pos)
@validate_line_column
def get_signatures(self, line=None, column=None):
"""
Return the function object of the call under the cursor.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`.Signature`
"""
pos = line, column
call_details = helpers.get_signature_details(self._module_node, pos)
if call_details is None:
return []
context = self._get_module_context().create_context(call_details.bracket_leaf)
definitions = helpers.cache_signatures(
self._inference_state,
context,
call_details.bracket_leaf,
self._code_lines,
pos
)
debug.speed('func_call followed')
# TODO here we use stubs instead of the actual values. We should use
# the signatures from stubs, but the actual values, probably?!
return [classes.Signature(self._inference_state, signature, call_details)
for signature in definitions.get_signatures()]
@validate_line_column
def get_context(self, line=None, column=None):
"""
Returns the scope context under the cursor. This basically means the
function, class or module where the cursor is at.
:rtype: :class:`.Name`
"""
pos = (line, column)
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
if leaf.start_pos > pos or leaf.type == 'endmarker':
previous_leaf = leaf.get_previous_leaf()
if previous_leaf is not None:
leaf = previous_leaf
module_context = self._get_module_context()
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
# This is a bit of a special case. The context of a function/class
# name/param/keyword is always it's parent context, not the
# function itself. Catch all the cases here where we are before the
# suite object, but still in the function.
context = module_context.create_value(n).as_context()
else:
context = module_context.create_context(leaf)
while context.name is None:
context = context.parent_context # comprehensions
definition = classes.Name(self._inference_state, context.name)
while definition.type != 'module':
name = definition._name # TODO private access
tree_name = name.tree_name
if tree_name is not None: # Happens with lambdas.
scope = tree_name.get_definition()
if scope.start_pos[1] < column:
break
definition = definition.parent()
return definition
def _analysis(self):
self._inference_state.is_analysis = True
self._inference_state.analysis_modules = [self._module_node]
module = self._get_module_context()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_values(self._inference_state, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._inference_state.infer(context, node)
else:
defs = infer_call_of_leaf(context, node)
try_iter_content(defs)
self._inference_state.reset_recursion_limitations()
ana = [a for a in self._inference_state.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._inference_state.is_analysis = False
def get_names(self, **kwargs):
"""
Returns names defined in the current file.
:param all_scopes: If True lists the names of all scopes instead of
only the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
:rtype: list of :class:`.Name`
"""
names = self._names(**kwargs)
return [classes.Name(self._inference_state, n) for n in names]
def get_syntax_errors(self):
"""
Lists all syntax errors in the current file.
:rtype: list of :class:`.SyntaxError`
"""
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
def _names(self, all_scopes=False, definitions=True, references=False):
# Set line/column to a random position, because they don't matter.
module_context = self._get_module_context()
defs = [
module_context.create_name(name)
for name in helpers.get_module_names(
self._module_node,
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
]
return sorted(defs, key=lambda x: x.start_pos)
@_no_python2_support
def rename(self, line=None, column=None, **kwargs):
"""
Renames all references of the variable under the cursor.
:param new_name: The variable under the cursor will be renamed to this
string.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
return self._rename(line, column, **kwargs)
def _rename(self, line, column, new_name): # Python 2...
definitions = self.get_references(line, column, include_builtins=False)
return refactoring.rename(self._inference_state, definitions, new_name)
@_no_python2_support
def extract_variable(self, line, column, **kwargs):
"""
Moves an expression to a new statemenet.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
foo = 3.1
x = int(foo + 1)
the code above will become::
foo = 3.1
bar = foo + 1
x = int(bar)
:param new_name: The expression under the cursor will be renamed to
this string.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
return self._extract_variable(line, column, **kwargs) # Python 2...
@validate_line_column
def _extract_variable(self, line, column, new_name, until_line=None, until_column=None):
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_variable(
self._inference_state, self.path, self._module_node,
new_name, (line, column), until_pos
)
@_no_python2_support
def extract_function(self, line, column, **kwargs):
"""
Moves an expression to a new function.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
global_var = 3
def x():
foo = 3.1
x = int(foo + 1 + global_var)
the code above will become::
global_var = 3
def bar(foo):
return int(foo + 1 + global_var)
def x():
foo = 3.1
x = bar(foo)
:param new_name: The expression under the cursor will be replaced with
a function with this name.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
return self._extract_function(line, column, **kwargs) # Python 2...
@validate_line_column
def _extract_function(self, line, column, new_name, until_line=None, until_column=None):
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_function(
self._inference_state, self.path, self._get_module_context(),
new_name, (line, column), until_pos
)
@_no_python2_support
def inline(self, line=None, column=None):
"""
Inlines a variable under the cursor. This is basically the opposite of
extracting a variable. For example with the cursor on bar::
foo = 3.1
bar = foo + 1
x = int(bar)
the code above will become::
foo = 3.1
x = int(foo + 1)
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
return refactoring.inline(self._inference_state, names)
class Interpreter(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
_allow_descriptor_getattr_default = True
def __init__(self, code, namespaces, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
super(Interpreter, self).__init__(code, environment=environment,
project=Project(os.getcwd()), **kwds)
self.namespaces = namespaces
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
@cache.memoize_method
def _get_module_context(self):
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=KnownContentFileIO(self.path, self._code),
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def names(source=None, path=None, encoding='utf-8', all_scopes=False,
definitions=True, references=False, environment=None):
warnings.warn(
"Deprecated since version 0.16.0. Use Script(...).get_names instead.",
DeprecationWarning,
stacklevel=2
)
return Script(source, path=path, encoding=encoding).get_names(
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s, path=None).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed

View file

@ -0,0 +1,925 @@
"""
There are a couple of classes documented in here:
- :class:`.BaseName` as an abstact base class for almost everything.
- :class:`.Name` used in a lot of places
- :class:`.Completion` for completions
- :class:`.BaseSignature` as a base class for signatures
- :class:`.Signature` for :meth:`.Script.get_signatures` only
- :class:`.ParamName` used for parameters of signatures
- :class:`.Refactoring` for refactorings
- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only
These classes are the much biggest part of the API, because they contain
the interesting information about all operations.
"""
import re
import sys
import warnings
from parso.python.tree import search_ancestor
from jedi import settings
from jedi import debug
from jedi.inference.utils import unite
from jedi.cache import memoize_method
from jedi.inference import imports
from jedi.inference.imports import ImportName
from jedi.inference.compiled.mixed import MixedName
from jedi.inference.gradual.typeshed import StubModuleValue
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.base_value import ValueSet
from jedi.api.keywords import KeywordName
from jedi.api import completion_cache
from jedi.api.helpers import filter_follow_imports
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(inference_state, context):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Name
"""
filter = next(context.get_filters())
names = [name for name in filter.values()]
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
def _values_to_definitions(values):
return [Name(c.inference_state, c.name) for c in values]
class BaseName(object):
"""
The base class for all definitions, completions and signatures.
"""
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_collections': 'collections',
'_socket': 'socket',
'_sqlite3': 'sqlite3',
'__builtin__': 'builtins',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, inference_state, name):
self._inference_state = inference_state
self._name = name
"""
An instance of :class:`parso.python.tree.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
@memoize_method
def _get_module_context(self):
# This can take a while to complete, because in the worst case of
# imports (consider `import a` completions), we need to load all
# modules starting with a first.
return self._name.get_root_context()
@property
def module_path(self):
"""
Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``
:rtype: str or None
"""
module = self._get_module_context()
if module.is_stub() or not module.is_compiled():
# Compiled modules should not return a module path even if they
# have one.
return self._get_module_context().py__file__()
return None
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.get_public_name()
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.infer` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi._compatibility import no_unicode_pprint
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.infer()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='keyword', description='module keyword'>,
<Name full_name='__main__.C', description='class C'>,
<Name full_name='__main__.D', description='instance D'>,
<Name full_name='__main__.f', description='def f'>]
Finally, here is what you can get from :attr:`type`:
>>> defs = [str(d.type) for d in defs] # It's unicode and in Py2 has u before it.
>>> defs[0]
'module'
>>> defs[1]
'class'
>>> defs[2]
'instance'
>>> defs[3]
'function'
Valid values for type are ``module``, ``class``, ``instance``, ``function``,
``param``, ``path``, ``keyword`` and ``statement``.
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
if isinstance(self._name, imports.SubModuleName) or resolve:
for value in self._name.infer():
return value.api_type
return self._name.api_type
@property
def module_name(self):
"""
The module name, a bit similar to what ``__name__`` is in a random
Python module.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.infer()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._get_module_context().py__name__()
def in_builtin_module(self):
"""
Returns True, if this is a builtin module.
"""
value = self._get_module_context().get_value()
if isinstance(value, StubModuleValue):
return any(v.is_compiled() for v in value.non_stub_value_set)
return value.is_compiled()
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def get_definition_start_position(self):
"""
The (row, column) of the start of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.start_pos
return definition.start_pos
def get_definition_end_position(self):
"""
The (row, column) of the end of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.tree_name.end_pos
if self.type in ("function", "class"):
last_leaf = definition.get_last_leaf()
if last_leaf.type == "newline":
return last_leaf.get_previous_leaf().end_pos
return last_leaf.end_pos
return definition.end_pos
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, path='example.py')
>>> doc = script.infer(1, len('def f'))[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring, e.g. function signatures are prepended to their docstrings.
If you need the actual docstring, use ``raw=True`` instead.
>>> print(script.infer(1, len('def f'))[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
if isinstance(self._name, ImportName) and fast:
return ''
doc = self._get_docstring()
if raw:
return doc
signature_text = self._get_docstring_signature()
if signature_text and doc:
return signature_text + '\n\n' + doc
else:
return signature_text + doc
def _get_docstring(self):
return self._name.py__doc__()
def _get_docstring_signature(self):
return '\n'.join(
signature.to_string()
for signature in self._get_signatures(for_docstring=True)
)
@property
def description(self):
"""
A description of the :class:`.Name` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi._compatibility import no_unicode_pprint
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source) # line is maximum by default
>>> defs = script.infer(column=3)
>>> defs = sorted(defs, key=lambda d: d.line)
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='__main__.f', description='def f'>,
<Name full_name='__main__.C', description='class C'>]
>>> str(defs[0].description) # strip literals in python2
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ == 'param':
return typ + ' ' + self._name.to_string()
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + self._name.get_public_name()
definition = tree_name.get_definition(include_setitem=True) or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub(r'#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub(r'\s+', ' ', txt).strip()
return txt
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, path='example.py')
>>> print(script.infer(3, len('os.path.join'))[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
if not self._name.is_value_name:
return None
names = self._name.get_qualified_names(include_module_names=True)
if names is None:
return None
names = list(names)
try:
names[0] = self._mapping[names[0]]
except KeyError:
pass
return '.'.join(names)
def is_stub(self):
"""
Returns True if the current name is defined in a stub file.
"""
if not self._name.is_value_name:
return False
return self._name.get_root_context().is_stub()
def is_side_effect(self):
"""
Checks if a name is defined as ``self.foo = 3``. In case of self, this
function would return False, for foo it would return True.
"""
tree_name = self._name.tree_name
if tree_name is None:
return False
return tree_name.is_definition() and tree_name.parent.type == 'trailer'
def goto(self, **kwargs):
"""
Like :meth:`.Script.goto` (also supports the same params), but does it
for the current name. This is typically useful if you are using
something like :meth:`.Script.get_names()`.
:param follow_imports: The goto call will follow imports.
:param follow_builtin_imports: If follow_imports is True will try to
look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
:rtype: list of :class:`Name`
"""
with debug.increase_indent_cm('goto for %s' % self._name):
return self._goto(**kwargs)
def goto_assignments(self, **kwargs): # Python 2...
warnings.warn(
"Deprecated since version 0.16.0. Use .goto.",
DeprecationWarning,
stacklevel=2
)
return self.goto(**kwargs)
def _goto(self, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
if not self._name.is_value_name:
return []
names = self._name.goto()
if follow_imports:
names = filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
return [self if n == self._name else Name(self._inference_state, n)
for n in names]
def infer(self, **kwargs): # Python 2...
"""
Like :meth:`.Script.infer`, it can be useful to understand which type
the current name has.
Return the actual definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just very, very slow.
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this type
inference call.
:rtype: list of :class:`Name`
"""
with debug.increase_indent_cm('infer for %s' % self._name):
return self._infer(**kwargs)
def _infer(self, only_stubs=False, prefer_stubs=False):
assert not (only_stubs and prefer_stubs)
if not self._name.is_value_name:
return []
# First we need to make sure that we have stub names (if possible) that
# we can follow. If we don't do that, we can end up with the inferred
# results of Python objects instead of stubs.
names = convert_names([self._name], prefer_stubs=True)
values = convert_values(
ValueSet.from_sets(n.infer() for n in names),
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
resulting_names = [c.name for c in values]
return [self if n == self._name else Name(self._inference_state, n)
for n in resulting_names]
@property
@memoize_method
def params(self):
warnings.warn(
"Deprecated since version 0.16.0. Use get_signatures()[...].params",
DeprecationWarning,
stacklevel=2
)
# Only return the first one. There might be multiple one, especially
# with overloading.
for signature in self._get_signatures():
return [
Name(self._inference_state, n)
for n in signature.get_param_names(resolve_stars=True)
]
if self.type == 'function' or self.type == 'class':
# Fallback, if no signatures were defined (which is probably by
# itself a bug).
return []
raise AttributeError('There are no params defined on this.')
def parent(self):
"""
Returns the parent scope of this identifier.
:rtype: Name
"""
if not self._name.is_value_name:
return None
if self.type in ('function', 'class', 'param') and self._name.tree_name is not None:
# Since the parent_context doesn't really match what the user
# thinks of that the parent is here, we do these cases separately.
# The reason for this is the following:
# - class: Nested classes parent_context is always the
# parent_context of the most outer one.
# - function: Functions in classes have the module as
# parent_context.
# - param: The parent_context of a param is not its function but
# e.g. the outer class or module.
cls_or_func_node = self._name.tree_name.get_definition()
parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input')
context = self._get_module_context().create_value(parent).as_context()
else:
context = self._name.parent_context
if context is None:
return None
while context.name is None:
# Happens for comprehension contexts
context = context.parent_context
return Name(self._inference_state, context.name)
def __repr__(self):
return "<%s %sname=%r, description=%r>" % (
self.__class__.__name__,
'full_' if self.full_name else '',
self.full_name or self.name,
self.description,
)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if not self._name.is_value_name:
return ''
lines = self._name.get_root_context().code_lines
if lines is None:
# Probably a builtin module, just ignore in that case.
return ''
index = self._name.start_pos[0] - 1
start_index = max(index - before, 0)
return ''.join(lines[start_index:index + after + 1])
def _get_signatures(self, for_docstring=False):
if for_docstring and self._name.api_type == 'statement' and not self.is_stub():
# For docstrings we don't resolve signatures if they are simple
# statements and not stubs. This is a speed optimization.
return []
if isinstance(self._name, MixedName):
# While this would eventually happen anyway, it's basically just a
# shortcut to not infer anything tree related, because it's really
# not necessary.
return self._name.infer_compiled_value().get_signatures()
names = convert_names([self._name], prefer_stubs=True)
return [sig for name in names for sig in name.infer().get_signatures()]
def get_signatures(self):
"""
Returns all potential signatures for a function or a class. Multiple
signatures are typical if you use Python stubs with ``@overload``.
:rtype: list of :class:`BaseSignature`
"""
return [
BaseSignature(self._inference_state, s)
for s in self._get_signatures()
]
def execute(self):
"""
Uses type inference to "execute" this identifier and returns the
executed objects.
:rtype: list of :class:`Name`
"""
return _values_to_definitions(self._name.infer().execute_with_values())
def get_type_hint(self):
"""
Returns type hints like ``Iterable[int]`` or ``Union[int, str]``.
This method might be quite slow, especially for functions. The problem
is finding executions for those functions to return something like
``Callable[[int, str], str]``.
:rtype: str
"""
return self._name.infer().get_type_hint()
class Completion(BaseName):
"""
``Completion`` objects are returned from :meth:`.Script.complete`. They
provide additional information about a completion.
"""
def __init__(self, inference_state, name, stack, like_name_length,
is_fuzzy, cached_name=None):
super(Completion, self).__init__(inference_state, name)
self._like_name_length = like_name_length
self._stack = stack
self._is_fuzzy = is_fuzzy
self._cached_name = cached_name
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'function':
append = '('
name = self._name.get_public_name()
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Only works with non-fuzzy completions. Returns None if fuzzy
completions are used.
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your ``settings.py``.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which ``complete``
would be ``am=``.
"""
if self._is_fuzzy:
return None
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`.name`, but like :attr:`.name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
"""
Documented under :meth:`BaseName.docstring`.
"""
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super(Completion, self).docstring(raw=raw, fast=fast)
def _get_docstring(self):
if self._cached_name is not None:
return completion_cache.get_docstring(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super(Completion, self)._get_docstring()
def _get_docstring_signature(self):
if self._cached_name is not None:
return completion_cache.get_docstring_signature(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super(Completion, self)._get_docstring_signature()
def _get_cache(self):
return (
super(Completion, self).type,
super(Completion, self)._get_docstring_signature(),
super(Completion, self)._get_docstring(),
)
@property
def type(self):
"""
Documented under :meth:`BaseName.type`.
"""
# Purely a speed optimization.
if self._cached_name is not None:
return completion_cache.get_type(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super(Completion, self).type
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
class Name(BaseName):
"""
*Name* objects are returned from many different APIs including
:meth:`.Script.goto` or :meth:`.Script.infer`.
"""
def __init__(self, inference_state, definition):
super(Name, self).__init__(inference_state, definition)
@property
def desc_with_module(self):
warnings.warn(
"Deprecated since version 0.17.0. No replacement for now, maybe .full_name helps",
DeprecationWarning,
stacklevel=2
)
return "%s:%s" % (self.module_name, self.description)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of :class:`Name`
"""
defs = self._name.infer()
return sorted(
unite(defined_names(self._inference_state, d.as_context()) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._inference_state == other._inference_state
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._inference_state))
class BaseSignature(Name):
"""
These signatures are returned by :meth:`BaseName.get_signatures`
calls.
"""
def __init__(self, inference_state, signature):
super(BaseSignature, self).__init__(inference_state, signature.name)
self._signature = signature
@property
def params(self):
"""
Returns definitions for all parameters that a signature defines.
This includes stuff like ``*args`` and ``**kwargs``.
:rtype: list of :class:`.ParamName`
"""
return [ParamName(self._inference_state, n)
for n in self._signature.get_param_names(resolve_stars=True)]
def to_string(self):
"""
Returns a text representation of the signature. This could for example
look like ``foo(bar, baz: int, **kwargs)``.
:rtype: str
"""
return self._signature.to_string()
class Signature(BaseSignature):
"""
A full signature object is the return value of
:meth:`.Script.get_signatures`.
"""
def __init__(self, inference_state, signature, call_details):
super(Signature, self).__init__(inference_state, signature)
self._call_details = call_details
self._signature = signature
@property
def index(self):
"""
Returns the param index of the current cursor position.
Returns None if the index cannot be found in the curent call.
:rtype: int
"""
return self._call_details.calculate_index(
self._signature.get_param_names(resolve_stars=True)
)
@property
def bracket_start(self):
"""
Returns a line/column tuple of the bracket that is responsible for the
last function call. The first line is 1 and the first column 0.
:rtype: int, int
"""
return self._call_details.bracket_leaf.start_pos
def __repr__(self):
return '<%s: index=%r %s>' % (
type(self).__name__,
self.index,
self._signature.to_string(),
)
class ParamName(Name):
def infer_default(self):
"""
Returns default values like the ``1`` of ``def foo(x=1):``.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_default())
def infer_annotation(self, **kwargs):
"""
:param execute_annotation: Default True; If False, values are not
executed and classes are returned instead of instances.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs))
def to_string(self):
"""
Returns a simple representation of a param, like
``f: Callable[..., Any]``.
:rtype: str
"""
return self._name.to_string()
@property
def kind(self):
"""
Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum.
:rtype: :py:attr:`inspect.Parameter.kind`
"""
if sys.version_info < (3, 5):
raise NotImplementedError(
'Python 2 is end-of-life, the new feature is not available for it'
)
return self._name.get_kind()

View file

@ -0,0 +1,665 @@
import re
from textwrap import dedent
from parso.python.token import PythonTokenTypes
from parso.python import tree
from parso.tree import search_ancestor, Leaf
from parso import split_lines
from jedi._compatibility import Parameter
from jedi import debug
from jedi import settings
from jedi.api import classes
from jedi.api import helpers
from jedi.api import keywords
from jedi.api.strings import complete_dict
from jedi.api.file_name import complete_file_name
from jedi.inference import imports
from jedi.inference.base_value import ValueSet
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
from jedi.inference.context import get_global_filters
from jedi.inference.value import TreeInstance, ModuleValue
from jedi.inference.names import ParamNameWrapper, SubModuleName
from jedi.inference.gradual.conversion import convert_values, convert_names
from jedi.parser_utils import cut_value_at_position
from jedi.plugins import plugin_manager
class ParamNameWithEquals(ParamNameWrapper):
def get_public_name(self):
return self.string_name + '='
def _get_signature_param_names(signatures, positional_count, used_kwargs):
# Add named params
for call_sig in signatures:
for i, p in enumerate(call_sig.params):
# Allow protected access, because it's a public API.
# TODO reconsider with Python 2 drop
kind = p._name.get_kind()
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
continue
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
and p.name not in used_kwargs:
yield ParamNameWithEquals(p._name)
def _must_be_kwarg(signatures, positional_count, used_kwargs):
if used_kwargs:
return True
must_be_kwarg = True
for signature in signatures:
for i, p in enumerate(signature.params):
# TODO reconsider with Python 2 drop
kind = p._name.get_kind()
if kind is Parameter.VAR_POSITIONAL:
# In case there were not already kwargs, the next param can
# always be a normal argument.
return False
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY):
must_be_kwarg = False
break
if not must_be_kwarg:
break
return must_be_kwarg
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
comp_dct = set()
if settings.case_insensitive_completion:
like_name = like_name.lower()
for name in completion_names:
string = name.string_name
if settings.case_insensitive_completion:
string = string.lower()
if helpers.match(string, like_name, fuzzy=fuzzy):
new = classes.Completion(
inference_state,
name,
stack,
len(like_name),
is_fuzzy=fuzzy,
cached_name=cached_name,
)
k = (new.name, new.complete) # key
if k not in comp_dct:
comp_dct.add(k)
tree_name = name.tree_name
if tree_name is not None:
definition = tree_name.get_definition()
if definition is not None and definition.type == 'del_stmt':
continue
yield new
def _remove_duplicates(completions, other_completions):
names = {d.name for d in other_completions}
return [c for c in completions if c.name not in names]
def get_user_context(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
return module_context.create_context(leaf)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
@plugin_manager.decorate()
def complete_param_names(context, function_name, decorator_nodes):
# Basically there's no way to do param completion. The plugins are
# responsible for this.
return []
class Completion:
def __init__(self, inference_state, module_context, code_lines, position,
signatures_callback, fuzzy=False):
self._inference_state = inference_state
self._module_context = module_context
self._module_node = module_context.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._original_position = position
self._signatures_callback = signatures_callback
self._fuzzy = fuzzy
def complete(self):
leaf = self._module_node.get_leaf_for_position(
self._original_position,
include_prefixes=True
)
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
prefixed_completions = complete_dict(
self._module_context,
self._code_lines,
start_leaf or leaf,
self._original_position,
None if string is None else quote + string,
fuzzy=self._fuzzy,
)
if string is not None and not prefixed_completions:
prefixed_completions = list(complete_file_name(
self._inference_state, self._module_context, start_leaf, quote, string,
self._like_name, self._signatures_callback,
self._code_lines, self._original_position,
self._fuzzy
))
if string is not None:
if not prefixed_completions and '\n' in string:
# Complete only multi line strings
prefixed_completions = self._complete_in_string(start_leaf, string)
return prefixed_completions
cached_name, completion_names = self._complete_python(leaf)
completions = list(filter_names(self._inference_state, completion_names,
self.stack, self._like_name,
self._fuzzy, cached_name=cached_name))
return (
# Removing duplicates mostly to remove False/True/None duplicates.
_remove_duplicates(prefixed_completions, completions)
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
)
def _complete_python(self, leaf):
"""
Analyzes the current context of a completion and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._inference_state.grammar
self.stack = stack = None
self._position = (
self._original_position[0],
self._original_position[1] - len(self._like_name)
)
cached_name = None
try:
self.stack = stack = helpers.get_stack_at_position(
grammar, self._code_lines, leaf, self._position
)
except helpers.OnErrorLeaf as e:
value = e.error_leaf.value
if value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return cached_name, []
# If we don't have a value, just use global completion.
return cached_name, self._complete_global_scope()
allowed_transitions = \
list(stack._allowed_transition_names_and_token_types())
if 'if' in allowed_transitions:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_transitions += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_transitions += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_transitions.append('else')
completion_names = []
kwargs_only = False
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
PythonTokenTypes.INDENT)):
# This means that we actually have to do type inference.
nonterminals = [stack_node.nonterminal for stack_node in stack]
nodes = _gather_nodes(stack)
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return cached_name, list(self._complete_inherited(is_function=True))
elif "import_stmt" in nonterminals:
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
completion_names += n
elif self._is_parameter_completion():
completion_names += self._complete_params(leaf)
else:
# Apparently this looks like it's good enough to filter most cases
# so that signature completions don't randomly appear.
# To understand why this works, three things are important:
# 1. trailer with a `,` in it is either a subscript or an arglist.
# 2. If there's no `,`, it's at the start and only signatures start
# with `(`. Other trailers could start with `.` or `[`.
# 3. Decorators are very primitive and have an optional `(` with
# optional arglist in them.
if nodes[-1] in ['(', ','] \
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
signatures = self._signatures_callback(*self._position)
if signatures:
call_details = signatures[0]._call_details
used_kwargs = list(call_details.iter_used_keyword_arguments())
positional_count = call_details.count_positional_arguments()
completion_names += _get_signature_param_names(
signatures,
positional_count,
used_kwargs,
)
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
if not kwargs_only:
completion_names += self._complete_global_scope()
completion_names += self._complete_inherited(is_function=False)
if not kwargs_only:
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
completion_names += self._complete_keywords(
allowed_transitions,
only_values=not (not current_line or current_line[-1] in ' \t.;'
and current_line[-3:] != '...')
)
return cached_name, completion_names
def _is_parameter_completion(self):
tos = self.stack[-1]
if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
# We are at the position `lambda `, where basically the next node
# is a param.
return True
if tos.nonterminal in 'parameters':
# Basically we are at the position `foo(`, there's nothing there
# yet, so we have no `typedargslist`.
return True
# var args is for lambdas and typed args for normal functions
return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
def _complete_params(self, leaf):
stack_node = self.stack[-2]
if stack_node.nonterminal == 'parameters':
stack_node = self.stack[-3]
if stack_node.nonterminal == 'funcdef':
context = get_user_context(self._module_context, self._position)
node = search_ancestor(leaf, 'error_node', 'funcdef')
if node is not None:
if node.type == 'error_node':
n = node.children[0]
if n.type == 'decorators':
decorators = n.children
elif n.type == 'decorator':
decorators = [n]
else:
decorators = []
else:
decorators = node.get_decorators()
function_name = stack_node.nodes[1]
return complete_param_names(context, function_name.value, decorators)
return []
def _complete_keywords(self, allowed_transitions, only_values):
for k in allowed_transitions:
if isinstance(k, str) and k.isalpha():
if not only_values or k in ('True', 'False', 'None'):
yield keywords.KeywordName(self._inference_state, k)
def _complete_global_scope(self):
context = get_user_context(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
context,
self._position,
flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _complete_trailer(self, previous_leaf):
inferred_context = self._module_context.create_context(previous_leaf)
values = infer_call_of_leaf(inferred_context, previous_leaf)
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
# The cached name simply exists to make speed optimizations for certain
# modules.
cached_name = None
if len(values) == 1:
v, = values
if v.is_module():
if len(v.string_names) == 1:
module_name = v.string_names[0]
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
cached_name = module_name
return cached_name, self._complete_trailer_for_values(values)
def _complete_trailer_for_values(self, values):
user_context = get_user_context(self._module_context, self._position)
return complete_trailer(user_context, values)
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._inference_state, names, self._module_context, level)
return i.completion_names(self._inference_state, only_modules=only_modules)
def _complete_inherited(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if cls is None:
return
# Complete the methods that are defined in the super classes.
class_value = self._module_context.create_value(cls)
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = class_value.get_filters(is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
# TODO we should probably check here for properties
if (name.api_type == 'function') == is_function:
yield name
def _complete_in_string(self, start_leaf, string):
"""
To make it possible for people to have completions in doctests or
generally in "Python" code in docstrings, we use the following
heuristic:
- Having an indented block of code
- Having some doctest code that starts with `>>>`
- Having backticks that doesn't have whitespace inside it
"""
def iter_relevant_lines(lines):
include_next_line = False
for l in code_lines:
if include_next_line or l.startswith('>>>') or l.startswith(' '):
yield re.sub(r'^( *>>> ?| +)', '', l)
else:
yield None
include_next_line = bool(re.match(' *>>>', l))
string = dedent(string)
code_lines = split_lines(string, keepends=True)
relevant_code_lines = list(iter_relevant_lines(code_lines))
if relevant_code_lines[-1] is not None:
# Some code lines might be None, therefore get rid of that.
relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines]
return self._complete_code_lines(relevant_code_lines)
match = re.search(r'`([^`\s]+)', code_lines[-1])
if match:
return self._complete_code_lines([match.group(1)])
return []
def _complete_code_lines(self, code_lines):
module_node = self._inference_state.grammar.parse(''.join(code_lines))
module_value = ModuleValue(
self._inference_state,
module_node,
code_lines=code_lines,
)
module_value.parent_context = self._module_context
return Completion(
self._inference_state,
module_value.as_context(),
code_lines=code_lines,
position=module_node.end_pos,
signatures_callback=lambda *args, **kwargs: [],
fuzzy=self._fuzzy
).complete()
def _gather_nodes(stack):
nodes = []
for stack_node in stack:
if stack_node.dfa.from_rule == 'small_stmt':
nodes = []
else:
nodes += stack_node.nodes
return nodes
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
def _extract_string_while_in_string(leaf, position):
def return_part_of_leaf(leaf):
kwargs = {}
if leaf.line == position[0]:
kwargs['endpos'] = position[1] - leaf.column
match = _string_start.match(leaf.value, **kwargs)
if not match:
return None, None, None
start = match.group(0)
if leaf.line == position[0] and position[1] < leaf.column + match.end():
return None, None, None
return cut_value_at_position(leaf, position)[match.end():], leaf, start
if position < leaf.start_pos:
return None, None, None
if leaf.type == 'string':
return return_part_of_leaf(leaf)
leaves = []
while leaf is not None:
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
if len(leaf.value) > 1:
return return_part_of_leaf(leaf)
prefix_leaf = None
if not leaf.prefix:
prefix_leaf = leaf.get_previous_leaf()
if prefix_leaf is None or prefix_leaf.type != 'name' \
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
prefix_leaf = None
return (
''.join(cut_value_at_position(l, position) for l in leaves),
prefix_leaf or leaf,
('' if prefix_leaf is None else prefix_leaf.value)
+ cut_value_at_position(leaf, position),
)
if leaf.line != position[0]:
# Multi line strings are always simple error leaves and contain the
# whole string, single line error leaves are atherefore important
# now and since the line is different, it's not really a single
# line string anymore.
break
leaves.insert(0, leaf)
leaf = leaf.get_previous_leaf()
return None, None, None
def complete_trailer(user_context, values):
completion_names = []
for value in values:
for filter in value.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
if not value.is_stub() and isinstance(value, TreeInstance):
completion_names += _complete_getattr(user_context, value)
python_values = convert_values(values)
for c in python_values:
if c not in values:
for filter in c.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _complete_getattr(user_context, instance):
"""
A heuristic to make completion for proxy objects work. This is not
intended to work in all cases. It works exactly in this case:
def __getattr__(self, name):
...
return getattr(any_object, name)
It is important that the return contains getattr directly, otherwise it
won't work anymore. It's really just a stupid heuristic. It will not
work if you write e.g. `return (getatr(o, name))`, because of the
additional parentheses. It will also not work if you move the getattr
to some other place that is not the return statement itself.
It is intentional that it doesn't work in all cases. Generally it's
really hard to do even this case (as you can see below). Most people
will write it like this anyway and the other ones, well they are just
out of luck I guess :) ~dave.
"""
names = (instance.get_function_slot_names(u'__getattr__')
or instance.get_function_slot_names(u'__getattribute__'))
functions = ValueSet.from_sets(
name.infer()
for name in names
)
for func in functions:
tree_node = func.tree_node
if tree_node is None or tree_node.type != 'funcdef':
continue
for return_stmt in tree_node.iter_return_stmts():
# Basically until the next comment we just try to find out if a
# return statement looks exactly like `return getattr(x, name)`.
if return_stmt.type != 'return_stmt':
continue
atom_expr = return_stmt.children[1]
if atom_expr.type != 'atom_expr':
continue
atom = atom_expr.children[0]
trailer = atom_expr.children[1]
if len(atom_expr.children) != 2 or atom.type != 'name' \
or atom.value != 'getattr':
continue
arglist = trailer.children[1]
if arglist.type != 'arglist' or len(arglist.children) < 3:
continue
context = func.as_context()
object_node = arglist.children[0]
# Make sure it's a param: foo in __getattr__(self, foo)
name_node = arglist.children[2]
name_list = context.goto(name_node, name_node.start_pos)
if not any(n.api_type == 'param' for n in name_list):
continue
# Now that we know that these are most probably completion
# objects, we just infer the object and return them as
# completions.
objects = context.infer_node(object_node)
return complete_trailer(user_context, objects)
return []
def search_in_module(inference_state, module_context, names, wanted_names,
wanted_type, complete=False, fuzzy=False,
ignore_imports=False, convert=False):
for s in wanted_names[:-1]:
new_names = []
for n in names:
if s == n.string_name:
if n.tree_name is not None and n.api_type == 'module' \
and ignore_imports:
continue
new_names += complete_trailer(
module_context,
n.infer()
)
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
names = new_names
last_name = wanted_names[-1].lower()
for n in names:
string = n.string_name.lower()
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
or not complete and string == last_name:
if isinstance(n, SubModuleName):
names = [v.name for v in n.infer()]
else:
names = [n]
if convert:
names = convert_names(names)
for n2 in names:
if complete:
def_ = classes.Completion(
inference_state, n2,
stack=None,
like_name_length=len(last_name),
is_fuzzy=fuzzy,
)
else:
def_ = classes.Name(inference_state, n2)
if not wanted_type or wanted_type == def_.type:
yield def_

View file

@ -0,0 +1,25 @@
_cache = {}
def save_entry(module_name, name, cache):
try:
module_cache = _cache[module_name]
except KeyError:
module_cache = _cache[module_name] = {}
module_cache[name] = cache
def _create_get_from_cache(number):
def _get_from_cache(module_name, name, get_cache_values):
try:
return _cache[module_name][name][number]
except KeyError:
v = get_cache_values()
save_entry(module_name, name, v)
return v[number]
return _get_from_cache
get_type = _create_get_from_cache(0)
get_docstring_signature = _create_get_from_cache(1)
get_docstring = _create_get_from_cache(2)

View file

@ -0,0 +1,489 @@
"""
Environments are a way to activate different Python versions or Virtualenvs for
static analysis. The Python binary in that environment is going to be executed.
"""
import os
import sys
import hashlib
import filecmp
from collections import namedtuple
from jedi._compatibility import highest_pickle_protocol, which
from jedi.cache import memoize_method, time_cache
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
InferenceStateSameProcess, InferenceStateSubprocess
import parso
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
_SUPPORTED_PYTHONS = ['3.8', '3.7', '3.6', '3.5', '2.7']
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
_CONDA_VAR = 'CONDA_PREFIX'
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
class InvalidPythonEnvironment(Exception):
"""
If you see this exception, the Python executable or Virtualenv you have
been trying to use is probably not a correct Python version.
"""
class _BaseEnvironment(object):
@memoize_method
def get_grammar(self):
version_string = '%s.%s' % (self.version_info.major, self.version_info.minor)
return parso.load_grammar(version=version_string)
@property
def _sha256(self):
try:
return self._hash
except AttributeError:
self._hash = _calculate_sha256_for_file(self.executable)
return self._hash
def _get_info():
return (
sys.executable,
sys.prefix,
sys.version_info[:3],
)
class Environment(_BaseEnvironment):
"""
This class is supposed to be created by internal Jedi architecture. You
should not create it directly. Please use create_environment or the other
functions instead. It is then returned by that function.
"""
_subprocess = None
def __init__(self, executable, env_vars=None):
self._start_executable = executable
self._env_vars = env_vars
# Initialize the environment
self._get_subprocess()
def _get_subprocess(self):
if self._subprocess is not None and not self._subprocess.is_crashed:
return self._subprocess
try:
self._subprocess = CompiledSubprocess(self._start_executable,
env_vars=self._env_vars)
info = self._subprocess._send(None, _get_info)
except Exception as exc:
raise InvalidPythonEnvironment(
"Could not get version information for %r: %r" % (
self._start_executable,
exc))
# Since it could change and might not be the same(?) as the one given,
# set it here.
self.executable = info[0]
"""
The Python executable, matches ``sys.executable``.
"""
self.path = info[1]
"""
The path to an environment, matches ``sys.prefix``.
"""
self.version_info = _VersionInfo(*info[2])
"""
Like :data:`sys.version_info`: a tuple to show the current
Environment's Python version.
"""
# py2 sends bytes via pickle apparently?!
if self.version_info.major == 2:
self.executable = self.executable.decode()
self.path = self.path.decode()
# Adjust pickle protocol according to host and client version.
self._subprocess._pickle_protocol = highest_pickle_protocol([
sys.version_info, self.version_info])
return self._subprocess
def __repr__(self):
version = '.'.join(str(i) for i in self.version_info)
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSubprocess(inference_state, self._get_subprocess())
@memoize_method
def get_sys_path(self):
"""
The sys path for this environment. Does not include potential
modifications from e.g. appending to :data:`sys.path`.
:returns: list of str
"""
# It's pretty much impossible to generate the sys path without actually
# executing Python. The sys path (when starting with -S) itself depends
# on how the Python version was compiled (ENV variables).
# If you omit -S when starting Python (normal case), additionally
# site.py gets executed.
return self._get_subprocess().get_sys_path()
class _SameEnvironmentMixin(object):
def __init__(self):
self._start_executable = self.executable = sys.executable
self.path = sys.prefix
self.version_info = _VersionInfo(*sys.version_info[:3])
self._env_vars = None
class SameEnvironment(_SameEnvironmentMixin, Environment):
pass
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSameProcess(inference_state)
def get_sys_path(self):
return sys.path
def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'):
"""Get virtualenv environment from VIRTUAL_ENV environment variable.
It uses `safe=False` with ``create_environment``, because the environment
variable is considered to be safe / controlled by the user solely.
"""
var = os.environ.get(env_var)
if var:
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if os.path.realpath(var) == os.path.realpath(sys.prefix):
return _try_get_same_env()
try:
return create_environment(var, safe=False)
except InvalidPythonEnvironment:
pass
def _calculate_sha256_for_file(path):
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(filecmp.BUFSIZE), b''):
sha256.update(block)
return sha256.hexdigest()
def get_default_environment():
"""
Tries to return an active Virtualenv or conda environment.
If there is no VIRTUAL_ENV variable or no CONDA_PREFIX variable set
set it will return the latest Python version installed on the system. This
makes it possible to use as many new Python features as possible when using
autocompletion and other functionality.
:returns: :class:`.Environment`
"""
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
return virtual_env
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
return conda_env
return _try_get_same_env()
def _try_get_same_env():
env = SameEnvironment()
if not os.path.basename(env.executable).lower().startswith('python'):
# This tries to counter issues with embedding. In some cases (e.g.
# VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
# happens, because for Mac a function called `_NSGetExecutablePath` is
# used and for Windows `GetModuleFileNameW`. These are both platform
# specific functions. For all other systems sys.executable should be
# alright. However here we try to generalize:
#
# 1. Check if the executable looks like python (heuristic)
# 2. In case it's not try to find the executable
# 3. In case we don't find it use an interpreter environment.
#
# The last option will always work, but leads to potential crashes of
# Jedi - which is ok, because it happens very rarely and even less,
# because the code below should work for most cases.
if os.name == 'nt':
# The first case would be a virtualenv and the second a normal
# Python installation.
checks = (r'Scripts\python.exe', 'python.exe')
else:
# For unix it looks like Python is always in a bin folder.
checks = (
'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
'bin/python%s' % (sys.version_info[0]),
'bin/python',
)
for check in checks:
guess = os.path.join(sys.exec_prefix, check)
if os.path.isfile(guess):
# Bingo - We think we have our Python.
return Environment(guess)
# It looks like there is no reasonable Python to be found.
return InterpreterEnvironment()
# If no virtualenv is found, use the environment we're already
# using.
return env
def get_cached_default_environment():
var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR)
environment = _get_cached_default_environment()
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if var and os.path.realpath(var) != os.path.realpath(environment.path):
_get_cached_default_environment.clear_cache()
return _get_cached_default_environment()
return environment
@time_cache(seconds=10 * 60) # 10 Minutes
def _get_cached_default_environment():
try:
return get_default_environment()
except InvalidPythonEnvironment:
# It's possible that `sys.executable` is wrong. Typically happens
# when Jedi is used in an executable that embeds Python. For further
# information, have a look at:
# https://github.com/davidhalter/jedi/issues/1531
return InterpreterEnvironment()
def find_virtualenvs(paths=None, **kwargs):
"""
:param paths: A list of paths in your file system to be scanned for
Virtualenvs. It will search in these paths and potentially execute the
Python binaries.
:param safe: Default True. In case this is False, it will allow this
function to execute potential `python` environments. An attacker might
be able to drop an executable in a path this function is searching by
default. If the executable has not been installed by root, it will not
be executed.
:param use_environment_vars: Default True. If True, the VIRTUAL_ENV
variable will be checked if it contains a valid VirtualEnv.
CONDA_PREFIX will be checked to see if it contains a valid conda
environment.
:yields: :class:`.Environment`
"""
def py27_comp(paths=None, safe=True, use_environment_vars=True):
if paths is None:
paths = []
_used_paths = set()
if use_environment_vars:
# Using this variable should be safe, because attackers might be
# able to drop files (via git) but not environment variables.
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
yield virtual_env
_used_paths.add(virtual_env.path)
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
yield conda_env
_used_paths.add(conda_env.path)
for directory in paths:
if not os.path.isdir(directory):
continue
directory = os.path.abspath(directory)
for path in os.listdir(directory):
path = os.path.join(directory, path)
if path in _used_paths:
# A path shouldn't be inferred twice.
continue
_used_paths.add(path)
try:
executable = _get_executable_path(path, safe=safe)
yield Environment(executable)
except InvalidPythonEnvironment:
pass
return py27_comp(paths, **kwargs)
def find_system_environments(**kwargs):
"""
Ignores virtualenvs and returns the Python versions that were installed on
your system. This might return nothing, if you're running Python e.g. from
a portable version.
The environments are sorted from latest to oldest Python version.
:yields: :class:`.Environment`
"""
for version_string in _SUPPORTED_PYTHONS:
try:
yield get_system_environment(version_string, **kwargs)
except InvalidPythonEnvironment:
pass
# TODO: this function should probably return a list of environments since
# multiple Python installations can be found on a system for the same version.
def get_system_environment(version, **kwargs):
"""
Return the first Python environment found for a string of the form 'X.Y'
where X and Y are the major and minor versions of Python.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
"""
exe = which('python' + version)
if exe:
if exe == sys.executable:
return SameEnvironment()
return Environment(exe)
if os.name == 'nt':
for exe in _get_executables_from_windows_registry(version):
try:
return Environment(exe, **kwargs)
except InvalidPythonEnvironment:
pass
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
def create_environment(path, safe=True, **kwargs):
"""
Make it possible to manually create an Environment object by specifying a
Virtualenv path or an executable path and optional environment variables.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
TODO: make env_vars a kwarg when Python 2 is dropped. For now, preserve API
"""
return _create_environment(path, safe, **kwargs)
def _create_environment(path, safe=True, env_vars=None):
if os.path.isfile(path):
_assert_safe(path, safe)
return Environment(path, env_vars=env_vars)
return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars)
def _get_executable_path(path, safe=True):
"""
Returns None if it's not actually a virtual env.
"""
if os.name == 'nt':
python = os.path.join(path, 'Scripts', 'python.exe')
else:
python = os.path.join(path, 'bin', 'python')
if not os.path.exists(python):
raise InvalidPythonEnvironment("%s seems to be missing." % python)
_assert_safe(python, safe)
return python
def _get_executables_from_windows_registry(version):
# The winreg module is named _winreg on Python 2.
try:
import winreg
except ImportError:
import _winreg as winreg
# TODO: support Python Anaconda.
sub_keys = [
r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
]
for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
for sub_key in sub_keys:
sub_key = sub_key.format(version=version)
try:
with winreg.OpenKey(root_key, sub_key) as key:
prefix = winreg.QueryValueEx(key, '')[0]
exe = os.path.join(prefix, 'python.exe')
if os.path.isfile(exe):
yield exe
except WindowsError:
pass
def _assert_safe(executable_path, safe):
if safe and not _is_safe(executable_path):
raise InvalidPythonEnvironment(
"The python binary is potentially unsafe.")
def _is_safe(executable_path):
# Resolve sym links. A venv typically is a symlink to a known Python
# binary. Only virtualenvs copy symlinks around.
real_path = os.path.realpath(executable_path)
if _is_unix_safe_simple(real_path):
return True
# Just check the list of known Python versions. If it's not in there,
# it's likely an attacker or some Python that was not properly
# installed in the system.
for environment in find_system_environments():
if environment.executable == real_path:
return True
# If the versions don't match, just compare the binary files. If we
# don't do that, only venvs will be working and not virtualenvs.
# venvs are symlinks while virtualenvs are actual copies of the
# Python files.
# This still means that if the system Python is updated and the
# virtualenv's Python is not (which is probably never going to get
# upgraded), it will not work with Jedi. IMO that's fine, because
# people should just be using venv. ~ dave
if environment._sha256 == _calculate_sha256_for_file(real_path):
return True
return False
def _is_unix_safe_simple(real_path):
if _is_unix_admin():
# In case we are root, just be conservative and
# only execute known paths.
return any(real_path.startswith(p) for p in _SAFE_PATHS)
uid = os.stat(real_path).st_uid
# The interpreter needs to be owned by root. This means that it wasn't
# written by a user and therefore attacking Jedi is not as simple.
# The attack could look like the following:
# 1. A user clones a repository.
# 2. The repository has an innocent looking folder called foobar. jedi
# searches for the folder and executes foobar/bin/python --version if
# there's also a foobar/bin/activate.
# 3. The attacker has gained code execution, since he controls
# foobar/bin/python.
return uid == 0
def _is_unix_admin():
try:
return os.getuid() == 0
except AttributeError:
return False # Windows

View file

@ -0,0 +1,46 @@
"""
This file is about errors in Python files and not about exception handling in
Jedi.
"""
def parso_to_jedi_errors(grammar, module_node):
return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
class SyntaxError(object):
"""
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
"""
def __init__(self, parso_error):
self._parso_error = parso_error
@property
def line(self):
"""The line where the error starts (starting with 1)."""
return self._parso_error.start_pos[0]
@property
def column(self):
"""The column where the error starts (starting with 0)."""
return self._parso_error.start_pos[1]
@property
def until_line(self):
"""The line where the error ends (starting with 1)."""
return self._parso_error.end_pos[0]
@property
def until_column(self):
"""The column where the error ends (starting with 0)."""
return self._parso_error.end_pos[1]
def get_message(self):
return self._parso_error.message
def __repr__(self):
return '<%s from=%s to=%s>' % (
self.__class__.__name__,
self._parso_error.start_pos,
self._parso_error.end_pos,
)

View file

@ -0,0 +1,31 @@
class _JediError(Exception):
pass
class InternalError(_JediError):
"""
This error might happen a subprocess is crashing. The reason for this is
usually broken C code in third party libraries. This is not a very common
thing and it is safe to use Jedi again. However using the same calls might
result in the same error again.
"""
class WrongVersion(_JediError):
"""
This error is reserved for the future, shouldn't really be happening at the
moment.
"""
class RefactoringError(_JediError):
"""
Refactorings can fail for various reasons. So if you work with refactorings
like :meth:`.Script.rename`, :meth:`.Script.inline`,
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
sure to catch these. The descriptions in the errors are ususally valuable
for end users.
A typical ``RefactoringError`` would tell the user that inlining is not
possible if no name is under the cursor.
"""

View file

@ -0,0 +1,156 @@
import os
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = u'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += force_unicode(s)
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])

View file

@ -0,0 +1,522 @@
"""
Helpers for the API
"""
import re
from collections import namedtuple
from textwrap import dedent
from itertools import chain
from functools import wraps
from parso.python.parser import Parser
from parso.python import tree
from jedi._compatibility import u, Parameter
from jedi.inference.base_value import NO_VALUES
from jedi.inference.syntax_tree import infer_atom
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.compiled import get_string_value_set
from jedi.cache import signature_time_cache, memoize_method
from jedi.parser_utils import get_parent_scope
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
def _start_match(string, like_name):
return string.startswith(like_name)
def _fuzzy_match(string, like_name):
if len(like_name) <= 1:
return like_name in string
pos = string.find(like_name[0])
if pos >= 0:
return _fuzzy_match(string[pos + 1:], like_name[1:])
return False
def match(string, like_name, fuzzy=False):
if fuzzy:
return _fuzzy_match(string, like_name)
else:
return _start_match(string, like_name)
def sorted_definitions(defs):
# Note: `or ''` below is required because `module_path` could be
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0, x.name))
def get_on_completion_name(module_node, lines, position):
leaf = module_node.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
# Completions inside strings are a bit special, we need to parse the
# string. The same is true for comments and error_leafs.
line = lines[position[0] - 1]
# The first step of completions is to get the name
return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
elif leaf.type not in ('name', 'keyword'):
return ''
return leaf.value[:position[1] - leaf.start_pos[1]]
def _get_code(code_lines, start_pos, end_pos):
# Get relevant lines.
lines = code_lines[start_pos[0] - 1:end_pos[0]]
# Remove the parts at the end of the line.
lines[-1] = lines[-1][:end_pos[1]]
# Remove first line indentation.
lines[0] = lines[0][start_pos[1]:]
return ''.join(lines)
class OnErrorLeaf(Exception):
@property
def error_leaf(self):
return self.args[0]
def _get_code_for_stack(code_lines, leaf, position):
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
# If we're not on a comment simply get the previous leaf and proceed.
leaf = leaf.get_previous_leaf()
if leaf is None:
return u('') # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
leaf = leaf.get_previous_leaf()
if leaf is None:
return u('')
if leaf.type == 'error_leaf' or leaf.type == 'string':
if leaf.start_pos[0] < position[0]:
# On a different line, we just begin anew.
return u('')
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
user_stmt = leaf
while True:
if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
break
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without value (part of a suite).
return u('')
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, leaf, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
tokens = grammar._tokenize(code)
for token in tokens:
if token.string == safeword:
raise EndMarkerReached()
elif token.prefix.endswith(safeword):
# This happens with comments.
raise EndMarkerReached()
elif token.string.endswith(safeword):
yield token # Probably an f-string literal that was not finished.
raise EndMarkerReached()
else:
yield token
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, leaf, pos))
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + ' ' + safeword
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return p.stack
raise SystemError(
"This really shouldn't happen. There's a bug in Jedi:\n%s"
% list(tokenize_without_endmarker(code))
)
def infer(inference_state, context, leaf):
if leaf.type == 'name':
return inference_state.infer(context, leaf)
parent = leaf.parent
definitions = NO_VALUES
if parent.type == 'atom':
# e.g. `(a + b)`
definitions = context.infer_node(leaf.parent)
elif parent.type == 'trailer':
# e.g. `a()`
definitions = infer_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
# e.g. `"foo"` or `1.0`
return infer_atom(context, leaf)
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_value_set(inference_state)
return definitions
def filter_follow_imports(names, follow_builtin_imports=False):
for name in names:
if name.is_import():
new_names = list(filter_follow_imports(
name.goto(),
follow_builtin_imports=follow_builtin_imports,
))
found_builtin = False
if follow_builtin_imports:
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
if found_builtin:
yield name
else:
for new_name in new_names:
yield new_name
else:
yield name
class CallDetails(object):
def __init__(self, bracket_leaf, children, position):
['bracket_leaf', 'call_index', 'keyword_name_str']
self.bracket_leaf = bracket_leaf
self._children = children
self._position = position
@property
def index(self):
return _get_index_and_key(self._children, self._position)[0]
@property
def keyword_name_str(self):
return _get_index_and_key(self._children, self._position)[1]
@memoize_method
def _list_arguments(self):
return list(_iter_arguments(self._children, self._position))
def calculate_index(self, param_names):
positional_count = 0
used_names = set()
star_count = -1
args = self._list_arguments()
if not args:
if param_names:
return 0
else:
return None
is_kwarg = False
for i, (star_count, key_start, had_equal) in enumerate(args):
is_kwarg |= had_equal | (star_count == 2)
if star_count:
pass # For now do nothing, we don't know what's in there here.
else:
if i + 1 != len(args): # Not last
if had_equal:
used_names.add(key_start)
else:
positional_count += 1
for i, param_name in enumerate(param_names):
kind = param_name.get_kind()
if not is_kwarg:
if kind == Parameter.VAR_POSITIONAL:
return i
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY):
if i == positional_count:
return i
if key_start is not None and not star_count == 1 or star_count == 2:
if param_name.string_name not in used_names \
and (kind == Parameter.KEYWORD_ONLY
or kind == Parameter.POSITIONAL_OR_KEYWORD
and positional_count <= i):
if star_count:
return i
if had_equal:
if param_name.string_name == key_start:
return i
else:
if param_name.string_name.startswith(key_start):
return i
if kind == Parameter.VAR_KEYWORD:
return i
return None
def iter_used_keyword_arguments(self):
for star_count, key_start, had_equal in list(self._list_arguments()):
if had_equal and key_start:
yield key_start
def count_positional_arguments(self):
count = 0
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
if star_count:
break
count += 1
return count
def _iter_arguments(nodes, position):
def remove_after_pos(name):
if name.type != 'name':
return None
return name.value[:position[1] - name.start_pos[1]]
# Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]]
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
for x in _iter_arguments(nodes_before[-1].children, position):
yield x # Python 2 :(
return
previous_node_yielded = False
stars_seen = 0
for i, node in enumerate(nodes_before):
if node.type == 'argument':
previous_node_yielded = True
first = node.children[0]
second = node.children[1]
if second == '=':
if second.start_pos < position:
yield 0, first.value, True
else:
yield 0, remove_after_pos(first), False
elif first in ('*', '**'):
yield len(first.value), remove_after_pos(second), False
else:
# Must be a Comprehension
first_leaf = node.get_first_leaf()
if first_leaf.type == 'name' and first_leaf.start_pos >= position:
yield 0, remove_after_pos(first_leaf), False
else:
yield 0, None, False
stars_seen = 0
elif node.type in ('testlist', 'testlist_star_expr'): # testlist is Python 2
for n in node.children[::2]:
if n.type == 'star_expr':
stars_seen = 1
n = n.children[1]
yield stars_seen, remove_after_pos(n), False
stars_seen = 0
# The count of children is even if there's a comma at the end.
previous_node_yielded = bool(len(node.children) % 2)
elif isinstance(node, tree.PythonLeaf) and node.value == ',':
if not previous_node_yielded:
yield stars_seen, '', False
stars_seen = 0
previous_node_yielded = False
elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'):
stars_seen = len(node.value)
elif node == '=' and nodes_before[-1]:
previous_node_yielded = True
before = nodes_before[i - 1]
if before.type == 'name':
yield 0, before.value, True
else:
yield 0, None, False
# Just ignore the star that is probably a syntax error.
stars_seen = 0
if not previous_node_yielded:
if nodes_before[-1].type == 'name':
yield stars_seen, remove_after_pos(nodes_before[-1]), False
else:
yield stars_seen, '', False
def _get_index_and_key(nodes, position):
"""
Returns the amount of commas and the keyword argument string.
"""
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
return _get_index_and_key(nodes_before[-1].children, position)
key_str = None
last = nodes_before[-1]
if last.type == 'argument' and last.children[1] == '=' \
and last.children[1].end_pos <= position:
# Checked if the argument
key_str = last.children[0].value
elif last == '=':
key_str = nodes_before[-2].value
return nodes_before.count(','), key_str
def _get_signature_details_from_error_node(node, additional_children, position):
for index, element in reversed(list(enumerate(node.children))):
# `index > 0` means that it's a trailer and not an atom.
if element == '(' and element.end_pos <= position and index > 0:
# It's an error node, we don't want to match too much, just
# until the parentheses is enough.
children = node.children[index:]
name = element.get_previous_leaf()
if name is None:
continue
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
return CallDetails(element, children + additional_children, position)
def get_signature_details(module, position):
leaf = module.get_leaf_for_position(position, include_prefixes=True)
# It's easier to deal with the previous token than the next one in this
# case.
if leaf.start_pos >= position:
# Whitespace / comments after the leaf count towards the previous leaf.
leaf = leaf.get_previous_leaf()
if leaf is None:
return None
# Now that we know where we are in the syntax tree, we start to look at
# parents for possible function definitions.
node = leaf.parent
while node is not None:
if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'):
# Don't show signatures if there's stuff before it that just
# makes it feel strange to have a signature.
return None
additional_children = []
for n in reversed(node.children):
if n.start_pos < position:
if n.type == 'error_node':
result = _get_signature_details_from_error_node(
n, additional_children, position
)
if result is not None:
return result
additional_children[0:0] = n.children
continue
additional_children.insert(0, n)
# Find a valid trailer
if node.type == 'trailer' and node.children[0] == '(' \
or node.type == 'decorator' and node.children[2] == '(':
# Additionally we have to check that an ending parenthesis isn't
# interpreted wrong. There are two cases:
# 1. Cursor before paren -> The current signature is good
# 2. Cursor after paren -> We need to skip the current signature
if not (leaf is node.children[-1] and position >= leaf.end_pos):
leaf = node.get_previous_leaf()
if leaf is None:
return None
return CallDetails(
node.children[0] if node.type == 'trailer' else node.children[2],
node.children,
position
)
node = node.parent
return None
@signature_time_cache("call_signatures_validity")
def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos):
"""This function calculates the cache key."""
line_index = user_pos[0] - 1
before_cursor = code_lines[line_index][:user_pos[1]]
other_lines = code_lines[bracket_leaf.start_pos[0]:line_index]
whole = ''.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = context.get_root_context().py__file__()
if module_path is None:
yield None # Don't cache!
else:
yield (module_path, before_bracket, bracket_leaf.start_pos)
yield infer(
inference_state,
context,
bracket_leaf.get_previous_leaf(),
)
def validate_line_column(func):
@wraps(func)
def wrapper(self, line=None, column=None, *args, **kwargs):
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
raise ValueError('`line` parameter is not in a valid range.')
line_string = self._code_lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
line_len -= 2
elif line_string.endswith('\n'):
line_len -= 1
column = line_len if column is None else column
if not (0 <= column <= line_len):
raise ValueError('`column` parameter (%d) is not in a valid range '
'(0-%d) for line %d (%r).' % (
column, line_len, line, line_string))
return func(self, line, column, *args, **kwargs)
return wrapper
def get_module_names(module, all_scopes, definitions=True, references=False):
"""
Returns a dictionary with name parts as keys and their call paths as
values.
"""
def def_ref_filter(name):
is_def = name.is_definition()
return definitions and is_def or references and not is_def
names = list(chain.from_iterable(module.get_used_names().values()))
if not all_scopes:
# We have to filter all the names that don't have the module as a
# parent_scope. There's None as a parent, because nodes in the module
# node have the parent module and not suite as all the others.
# Therefore it's important to catch that case.
def is_module_scope_name(name):
parent_scope = get_parent_scope(name)
# async functions have an extra wrapper. Strip it.
if parent_scope and parent_scope.type == 'async_stmt':
parent_scope = parent_scope.parent
return parent_scope in (module, None)
names = [n for n in names if is_module_scope_name(n)]
return filter(def_ref_filter, names)
def split_search_string(name):
type, _, dotted_names = name.rpartition(' ')
if type == 'def':
type = 'function'
return type, dotted_names.split('.')

View file

@ -0,0 +1,75 @@
"""
TODO Some parts of this module are still not well documented.
"""
from jedi.inference import compiled
from jedi.inference.base_value import ValueSet
from jedi.inference.filters import ParserTreeFilter, MergedFilter
from jedi.inference.names import TreeNameDefinition
from jedi.inference.compiled import mixed
from jedi.inference.compiled.access import create_access_path
from jedi.inference.context import ModuleContext
def _create(inference_state, obj):
return compiled.create_from_access_path(
inference_state, create_access_path(inference_state, obj)
)
class NamespaceObject(object):
def __init__(self, dct):
self.__dict__ = dct
class MixedTreeName(TreeNameDefinition):
def infer(self):
"""
In IPython notebook it is typical that some parts of the code that is
provided was already executed. In that case if something is not properly
inferred, it should still infer from the variables it already knows.
"""
inferred = super(MixedTreeName, self).infer()
if not inferred:
for compiled_value in self.parent_context.mixed_values:
for f in compiled_value.get_filters():
values = ValueSet.from_sets(
n.infer() for n in f.get(self.string_name)
)
if values:
return values
return inferred
class MixedParserTreeFilter(ParserTreeFilter):
name_class = MixedTreeName
class MixedModuleContext(ModuleContext):
def __init__(self, tree_module_value, namespaces):
super(MixedModuleContext, self).__init__(tree_module_value)
self.mixed_values = [
self._get_mixed_object(
_create(self.inference_state, NamespaceObject(n))
) for n in namespaces
]
def _get_mixed_object(self, compiled_value):
return mixed.MixedObject(
compiled_value=compiled_value,
tree_value=self._value
)
def get_filters(self, until_position=None, origin_scope=None):
yield MergedFilter(
MixedParserTreeFilter(
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
),
self.get_global_filter(),
)
for mixed_object in self.mixed_values:
for filter in mixed_object.get_filters(until_position, origin_scope):
yield filter

View file

@ -0,0 +1,57 @@
import pydoc
from jedi.inference.utils import ignored
from jedi.inference.names import AbstractArbitraryName
try:
from pydoc_data import topics as pydoc_topics
except ImportError:
# Python 2
try:
import pydoc_topics
except ImportError:
# This is for Python 3 embeddable version, which dont have
# pydoc_data module in its file python3x.zip.
pydoc_topics = None
class KeywordName(AbstractArbitraryName):
api_type = u'keyword'
def py__doc__(self):
return imitate_pydoc(self.string_name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
# str needed because of possible unicode stuff in py2k (pydoc doesn't work
# with unicode strings)
string = str(string)
h = pydoc.help
with ignored(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
def get_target(s):
return h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics.topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''

View file

@ -0,0 +1,427 @@
"""
Projects are a way to handle Python projects within Jedi. For simpler plugins
you might not want to deal with projects, but if you want to give the user more
flexibility to define sys paths and Python interpreters for a project,
:class:`.Project` is the perfect way to allow for that.
Projects can be saved to disk and loaded again, to allow project definitions to
be used across repositories.
"""
import os
import errno
import json
import sys
from jedi._compatibility import FileNotFoundError, PermissionError, \
IsADirectoryError, NotADirectoryError
from jedi import debug
from jedi.api.environment import get_cached_default_environment, create_environment
from jedi.api.exceptions import WrongVersion
from jedi.api.completion import search_in_module
from jedi.api.helpers import split_search_string, get_module_names
from jedi._compatibility import force_unicode
from jedi.inference.imports import load_module_from_path, \
load_namespace_from_path, iter_module_names
from jedi.inference.sys_path import discover_buildout_paths
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
from jedi.file_io import FolderIO
from jedi.common import traverse_parents
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = \
'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
_SERIALIZER_VERSION = 1
def _try_to_skip_duplicates(func):
def wrapper(*args, **kwargs):
found_tree_nodes = []
found_modules = []
for definition in func(*args, **kwargs):
tree_node = definition._name.tree_name
if tree_node is not None and tree_node in found_tree_nodes:
continue
if definition.type == 'module' and definition.module_path is not None:
if definition.module_path in found_modules:
continue
found_modules.append(definition.module_path)
yield definition
found_tree_nodes.append(tree_node)
return wrapper
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
def _force_unicode_list(lst):
return list(map(force_unicode, lst))
class Project(object):
"""
Projects are a simple way to manage Python folders and define how Jedi does
import resolution. It is mostly used as a parameter to :class:`.Script`.
Additionally there are functions to search a whole project.
"""
_environment = None
@staticmethod
def _get_config_folder_path(base_path):
return os.path.join(base_path, _CONFIG_FOLDER)
@staticmethod
def _get_json_path(base_path):
return os.path.join(Project._get_config_folder_path(base_path), 'project.json')
@classmethod
def load(cls, path):
"""
Loads a project from a specific path. You should not provide the path
to ``.jedi/project.json``, but rather the path to the project folder.
:param path: The path of the directory you want to use as a project.
"""
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
return cls(**data)
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def save(self):
"""
Saves the project configuration in the project in ``.jedi/project.json``.
"""
data = dict(self.__dict__)
data.pop('_environment', None)
data.pop('_django', None) # TODO make django setting public?
data = {k.lstrip('_'): v for k, v in data.items()}
# TODO when dropping Python 2 use pathlib.Path.mkdir(parents=True, exist_ok=True)
try:
os.makedirs(self._get_config_folder_path(self._path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(self._get_json_path(self._path), 'w') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def __init__(self, path, **kwargs):
"""
:param path: The base path for this project.
:param environment_path: The Python executable path, typically the path
of a virtual environment.
:param load_unsafe_extensions: Default False, Loads extensions that are not in the
sys path and in the local directories. With this option enabled,
this is potentially unsafe if you clone a git repository and
analyze it's code, because those compiled extensions will be
important and therefore have execution privileges.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated by the
environment (virtualenvs, etc).
:param added_sys_path: list of str. Adds these paths at the end of the
sys path.
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
def py2_comp(path, environment_path=None, load_unsafe_extensions=False,
sys_path=None, added_sys_path=(), smart_sys_path=True):
self._path = os.path.abspath(path)
self._environment_path = environment_path
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._load_unsafe_extensions = load_unsafe_extensions
self._django = False
self.added_sys_path = list(added_sys_path)
"""The sys path that is going to be added at the end of the """
py2_comp(path, **kwargs)
@property
def path(self):
"""
The base path for this project.
"""
return self._path
@inference_state_as_method_param_cache()
def _get_base_sys_path(self, inference_state):
# The sys path has not been set explicitly.
sys_path = list(inference_state.environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@inference_state_as_method_param_cache()
def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = list(self.added_sys_path)
prefixed = []
if self._sys_path is None:
sys_path = list(self._get_base_sys_path(inference_state))
else:
sys_path = list(self._sys_path)
if self._smart_sys_path:
prefixed.append(self._path)
if inference_state.script_path is not None:
suffixed += discover_buildout_paths(inference_state, inference_state.script_path)
if add_parent_paths:
# Collect directories in upward search by:
# 1. Skipping directories with __init__.py
# 2. Stopping immediately when above self._path
traversed = []
for parent_path in traverse_parents(inference_state.script_path):
if parent_path == self._path or not parent_path.startswith(self._path):
break
if not add_init_paths \
and os.path.isfile(os.path.join(parent_path, "__init__.py")):
continue
traversed.append(parent_path)
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(self._path)
path = prefixed + sys_path + suffixed
return list(_force_unicode_list(_remove_duplicates_from_path(path)))
def get_environment(self):
if self._environment is None:
if self._environment_path is not None:
self._environment = create_environment(self._environment_path, safe=False)
else:
self._environment = get_cached_default_environment()
return self._environment
def search(self, string, **kwargs):
"""
Searches a name in the whole project. If the project is very big,
at some point Jedi will stop searching. However it's also very much
recommended to not exhaust the generator. Just display the first ten
results to the user.
There are currently three different search patterns:
- ``foo`` to search for a definition foo in any file or a file called
``foo.py`` or ``foo.pyi``.
- ``foo.bar`` to search for the ``foo`` and then an attribute ``bar``
in it.
- ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific
API type.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search(string, **kwargs)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. An empty string
lists all definitions in a project, so be careful with that.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
def _search(self, string, all_scopes=False): # Python 2..
return self._search_func(string, all_scopes=all_scopes)
@_try_to_skip_duplicates
def _search_func(self, string, complete=False, all_scopes=False):
# Using a Script is they easiest way to get an empty module context.
from jedi import Script
s = Script('', project=self)
inference_state = s._inference_state
empty_module_context = s._get_module_context()
if inference_state.grammar.version_info < (3, 6) or sys.version_info < (3, 6):
raise NotImplementedError(
"No support for refactorings/search on Python 2/3.5"
)
debug.dbg('Search for string %s, complete=%s', string, complete)
wanted_type, wanted_names = split_search_string(string)
name = wanted_names[0]
stub_folder_name = name + '-stubs'
ios = recurse_find_python_folders_and_files(FolderIO(self._path))
file_ios = []
# 1. Search for modules in the current project
for folder_io, file_io in ios:
if file_io is None:
file_name = folder_io.get_base_name()
if file_name == name or file_name == stub_folder_name:
f = folder_io.get_file_io('__init__.py')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
f = folder_io.get_file_io('__init__.pyi')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
m = load_namespace_from_path(inference_state, folder_io).as_context()
else:
continue
else:
file_ios.append(file_io)
file_name = os.path.basename(file_io.path)
if file_name in (name + '.py', name + '.pyi'):
m = load_module_from_path(inference_state, file_io).as_context()
else:
continue
debug.dbg('Search of a specific module %s', m)
for x in search_in_module(
inference_state,
m,
names=[m.name],
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
ignore_imports=True,
):
yield x # Python 2...
# 2. Search for identifiers in the project.
for module_context in search_in_file_ios(inference_state, file_ios, name):
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
names = [module_context.create_name(n) for n in names]
names = _remove_imports(names)
for x in search_in_module(
inference_state,
module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
ignore_imports=True,
):
yield x # Python 2...
# 3. Search for modules on sys.path
sys_path = [
p for p in self._get_sys_path(inference_state)
# Exclude folders that are handled by recursing of the Python
# folders.
if not p.startswith(self._path)
]
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
for x in search_in_module(
inference_state,
empty_module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
):
yield x # Python 2...
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
if os.path.exists(os.path.join(path, name)):
return True
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, IsADirectoryError, PermissionError):
return False
def get_default_project(path=None):
"""
If a project is not defined by the user, Jedi tries to define a project by
itself as well as possible. Jedi traverses folders until it finds one of
the following:
1. A ``.jedi/config.json``
2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
``requirements.txt`` and ``MANIFEST.in``.
"""
if path is None:
path = os.getcwd()
check = os.path.realpath(path)
probable_path = None
first_no_init_file = None
for dir in traverse_parents(check, include_current=True):
try:
return Project.load(dir)
except (FileNotFoundError, IsADirectoryError, PermissionError):
pass
except NotADirectoryError:
continue
if first_no_init_file is None:
if os.path.exists(os.path.join(dir, '__init__.py')):
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
else:
first_no_init_file = dir
if _is_django_path(dir):
project = Project(dir)
project._django = True
return project
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if os.path.isdir(path) else os.path.dirname(path)
return Project(curdir)
def _remove_imports(names):
return [
n for n in names
if n.tree_name is None or n.api_type != 'module'
]

View file

@ -0,0 +1,249 @@
from os.path import dirname, basename, join, relpath
import os
import re
import difflib
from parso import split_lines
from jedi.api.exceptions import RefactoringError
EXPRESSION_PARTS = (
'or_test and_test not_test comparison '
'expr xor_expr and_expr shift_expr arith_expr term factor power atom_expr'
).split()
class ChangedFile(object):
def __init__(self, inference_state, from_path, to_path,
module_node, node_to_str_map):
self._inference_state = inference_state
self._from_path = from_path
self._to_path = to_path
self._module_node = module_node
self._node_to_str_map = node_to_str_map
def get_diff(self):
old_lines = split_lines(self._module_node.get_code(), keepends=True)
new_lines = split_lines(self.get_new_code(), keepends=True)
# Add a newline at the end if it's missing. Otherwise the diff will be
# very weird. A `diff -u file1 file2` would show the string:
#
# \ No newline at end of file
#
# This is not necessary IMO, because Jedi does not really play with
# newlines and the ending newline does not really matter in Python
# files. ~dave
if old_lines[-1] != '':
old_lines[-1] += '\n'
if new_lines[-1] != '':
new_lines[-1] += '\n'
project_path = self._inference_state.project.path
if self._from_path is None:
from_p = ''
else:
from_p = relpath(self._from_path, project_path)
if self._to_path is None:
to_p = ''
else:
to_p = relpath(self._to_path, project_path)
diff = difflib.unified_diff(
old_lines, new_lines,
fromfile=from_p,
tofile=to_p,
)
# Apparently there's a space at the end of the diff - for whatever
# reason.
return ''.join(diff).rstrip(' ')
def get_new_code(self):
return self._inference_state.grammar.refactor(self._module_node, self._node_to_str_map)
def apply(self):
if self._from_path is None:
raise RefactoringError(
'Cannot apply a refactoring on a Script with path=None'
)
with open(self._from_path, 'w', newline='') as f:
f.write(self.get_new_code())
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._from_path)
class Refactoring(object):
def __init__(self, inference_state, file_to_node_changes, renames=()):
self._inference_state = inference_state
self._renames = renames
self._file_to_node_changes = file_to_node_changes
def get_changed_files(self):
"""
Returns a path to ``ChangedFile`` map.
"""
def calculate_to_path(p):
if p is None:
return p
for from_, to in renames:
if p.startswith(from_):
p = to + p[len(from_):]
return p
renames = self.get_renames()
return {
path: ChangedFile(
self._inference_state,
from_path=path,
to_path=calculate_to_path(path),
module_node=next(iter(map_)).get_root_node(),
node_to_str_map=map_
) for path, map_ in sorted(self._file_to_node_changes.items())
}
def get_renames(self):
"""
Files can be renamed in a refactoring.
Returns ``Iterable[Tuple[str, str]]``.
"""
return sorted(self._renames)
def get_diff(self):
text = ''
project_path = self._inference_state.project.path
for from_, to in self.get_renames():
text += 'rename from %s\nrename to %s\n' \
% (relpath(from_, project_path), relpath(to, project_path))
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
def apply(self):
"""
Applies the whole refactoring to the files, which includes renames.
"""
for f in self.get_changed_files().values():
f.apply()
for old, new in self.get_renames():
os.rename(old, new)
def _calculate_rename(path, new_name):
name = basename(path)
dir_ = dirname(path)
if name in ('__init__.py', '__init__.pyi'):
parent_dir = dirname(dir_)
return dir_, join(parent_dir, new_name)
ending = re.search(r'\.pyi?$', name).group(0)
return path, join(dir_, new_name + ending)
def rename(inference_state, definitions, new_name):
file_renames = set()
file_tree_name_map = {}
if not definitions:
raise RefactoringError("There is no name under the cursor")
for d in definitions:
tree_name = d._name.tree_name
if d.type == 'module' and tree_name is None:
file_renames.add(_calculate_rename(d.module_path, new_name))
else:
# This private access is ok in a way. It's not public to
# protect Jedi users from seeing it.
if tree_name is not None:
fmap = file_tree_name_map.setdefault(d.module_path, {})
fmap[tree_name] = tree_name.prefix + new_name
return Refactoring(inference_state, file_tree_name_map, file_renames)
def inline(inference_state, names):
if not names:
raise RefactoringError("There is no name under the cursor")
if any(n.api_type == 'module' for n in names):
raise RefactoringError("Cannot inline imports or modules")
if any(n.tree_name is None for n in names):
raise RefactoringError("Cannot inline builtins/extensions")
definitions = [n for n in names if n.tree_name.is_definition()]
if len(definitions) == 0:
raise RefactoringError("No definition found to inline")
if len(definitions) > 1:
raise RefactoringError("Cannot inline a name with multiple definitions")
if len(names) == 1:
raise RefactoringError("There are no references to this name")
tree_name = definitions[0].tree_name
expr_stmt = tree_name.get_definition()
if expr_stmt.type != 'expr_stmt':
type_ = dict(
funcdef='function',
classdef='class',
).get(expr_stmt.type, expr_stmt.type)
raise RefactoringError("Cannot inline a %s" % type_)
if len(expr_stmt.get_defined_names(include_setitem=True)) > 1:
raise RefactoringError("Cannot inline a statement with multiple definitions")
first_child = expr_stmt.children[1]
if first_child.type == 'annassign' and len(first_child.children) == 4:
first_child = first_child.children[2]
if first_child != '=':
if first_child.type == 'annassign':
raise RefactoringError(
'Cannot inline a statement that is defined by an annotation'
)
else:
raise RefactoringError(
'Cannot inline a statement with "%s"'
% first_child.get_code(include_prefix=False)
)
rhs = expr_stmt.get_rhs()
replace_code = rhs.get_code(include_prefix=False)
references = [n for n in names if not n.tree_name.is_definition()]
file_to_node_changes = {}
for name in references:
tree_name = name.tree_name
path = name.get_root_context().py__file__()
s = replace_code
if rhs.type == 'testlist_star_expr' \
or tree_name.parent.type in EXPRESSION_PARTS \
or tree_name.parent.type == 'trailer' \
and tree_name.parent.get_next_sibling() is not None:
s = '(' + replace_code + ')'
of_path = file_to_node_changes.setdefault(path, {})
n = tree_name
prefix = n.prefix
par = n.parent
if par.type == 'trailer' and par.children[0] == '.':
prefix = par.parent.children[0].prefix
n = par
for some_node in par.parent.children[:par.parent.children.index(par)]:
of_path[some_node] = ''
of_path[n] = prefix + s
path = definitions[0].get_root_context().py__file__()
changes = file_to_node_changes.setdefault(path, {})
changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix)
next_leaf = expr_stmt.get_next_leaf()
# Most of the time we have to remove the newline at the end of the
# statement, but if there's a comment we might not need to.
if next_leaf.prefix.strip(' \t') == '' \
and (next_leaf.type == 'newline' or next_leaf == ';'):
changes[next_leaf] = ''
return Refactoring(inference_state, file_to_node_changes)
def _remove_indent_of_prefix(prefix):
r"""
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
"""
return ''.join(split_lines(prefix, keepends=True)[:-1])

View file

@ -0,0 +1,387 @@
from textwrap import dedent
from parso import split_lines
from jedi import debug
from jedi.api.exceptions import RefactoringError
from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS
from jedi.common import indent_block
from jedi.parser_utils import function_is_classmethod, function_is_staticmethod
_DEFINITION_SCOPES = ('suite', 'file_input')
_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \
('atom testlist_star_expr testlist test lambdef lambdef_nocond '
'keyword name number string fstring').split()
def extract_variable(inference_state, path, module_node, name, pos, until_pos):
nodes = _find_nodes(module_node, pos, until_pos)
debug.dbg('Extracting nodes: %s', nodes)
is_expression, message = _is_expression_with_error(nodes)
if not is_expression:
raise RefactoringError(message)
generated_code = name + ' = ' + _expression_nodes_to_string(nodes)
file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)}
return Refactoring(inference_state, file_to_node_changes)
def _is_expression_with_error(nodes):
"""
Returns a tuple (is_expression, error_string).
"""
if any(node.type == 'name' and node.is_definition() for node in nodes):
return False, 'Cannot extract a name that defines something'
if nodes[0].type not in _VARIABLE_EXCTRACTABLE:
return False, 'Cannot extract a "%s"' % nodes[0].type
return True, ''
def _find_nodes(module_node, pos, until_pos):
"""
Looks up a module and tries to find the appropriate amount of nodes that
are in there.
"""
start_node = module_node.get_leaf_for_position(pos, include_prefixes=True)
if until_pos is None:
if start_node.type == 'operator':
next_leaf = start_node.get_next_leaf()
if next_leaf is not None and next_leaf.start_pos == pos:
start_node = next_leaf
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
if start_node.parent.type == 'trailer':
start_node = start_node.parent.parent
while start_node.parent.type in EXPRESSION_PARTS:
start_node = start_node.parent
nodes = [start_node]
else:
# Get the next leaf if we are at the end of a leaf
if start_node.end_pos == pos:
next_leaf = start_node.get_next_leaf()
if next_leaf is not None:
start_node = next_leaf
# Some syntax is not exactable, just use its parent
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
# Find the end
end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True)
if end_leaf.start_pos > until_pos:
end_leaf = end_leaf.get_previous_leaf()
if end_leaf is None:
raise RefactoringError('Cannot extract anything from that')
parent_node = start_node
while parent_node.end_pos < end_leaf.end_pos:
parent_node = parent_node.parent
nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos)
# If the user marks just a return statement, we return the expression
# instead of the whole statement, because the user obviously wants to
# extract that part.
if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'):
return [nodes[0].children[1]]
return nodes
def _replace(nodes, expression_replacement, extracted, pos,
insert_before_leaf=None, remaining_prefix=None):
# Now try to replace the nodes found with a variable and move the code
# before the current statement.
definition = _get_parent_definition(nodes[0])
if insert_before_leaf is None:
insert_before_leaf = definition.get_first_leaf()
first_node_leaf = nodes[0].get_first_leaf()
lines = split_lines(insert_before_leaf.prefix, keepends=True)
if first_node_leaf is insert_before_leaf:
if remaining_prefix is not None:
# The remaining prefix has already been calculated.
lines[:-1] = remaining_prefix
lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n']
extracted_prefix = ''.join(lines)
replacement_dct = {}
if first_node_leaf is insert_before_leaf:
replacement_dct[nodes[0]] = extracted_prefix + expression_replacement
else:
if remaining_prefix is None:
p = first_node_leaf.prefix
else:
p = remaining_prefix + _get_indentation(nodes[0])
replacement_dct[nodes[0]] = p + expression_replacement
replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value
for node in nodes[1:]:
replacement_dct[node] = ''
return replacement_dct
def _expression_nodes_to_string(nodes):
return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes))
def _suite_nodes_to_string(nodes, pos):
n = nodes[0]
prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1)
code = part_of_code + n.get_code(include_prefix=False) \
+ ''.join(n.get_code() for n in nodes[1:])
return prefix, code
def _split_prefix_at(leaf, until_line):
"""
Returns a tuple of the leaf's prefix, split at the until_line
position.
"""
# second means the second returned part
second_line_count = leaf.start_pos[0] - until_line
lines = split_lines(leaf.prefix, keepends=True)
return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:])
def _get_indentation(node):
return split_lines(node.get_first_leaf().prefix)[-1]
def _get_parent_definition(node):
"""
Returns the statement where a node is defined.
"""
while node is not None:
if node.parent.type in _DEFINITION_SCOPES:
return node
node = node.parent
raise NotImplementedError('We should never even get here')
def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
"""
This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even
though it is not part of the expression.
"""
typ = parent_node.type
is_suite_part = typ in ('suite', 'file_input')
if typ in EXPRESSION_PARTS or is_suite_part:
nodes = parent_node.children
for i, n in enumerate(nodes):
if n.end_pos > pos:
start_index = i
if n.type == 'operator':
start_index -= 1
break
for i, n in reversed(list(enumerate(nodes))):
if n.start_pos < until_pos:
end_index = i
if n.type == 'operator':
end_index += 1
# Something like `not foo or bar` should not be cut after not
for n2 in nodes[i:]:
if _is_not_extractable_syntax(n2):
end_index += 1
else:
break
break
nodes = nodes[start_index:end_index + 1]
if not is_suite_part:
nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos)
nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos)
return nodes
return [parent_node]
def _is_not_extractable_syntax(node):
return node.type == 'operator' \
or node.type == 'keyword' and node.value not in ('None', 'True', 'False')
def extract_function(inference_state, path, module_context, name, pos, until_pos):
nodes = _find_nodes(module_context.tree_node, pos, until_pos)
assert len(nodes)
is_expression, _ = _is_expression_with_error(nodes)
context = module_context.create_context(nodes[0])
is_bound_method = context.is_bound_method()
params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes))
# Find variables
# Is a class method / method
if context.is_module():
insert_before_leaf = None # Leaf will be determined later
else:
node = _get_code_insertion_node(context.tree_node, is_bound_method)
insert_before_leaf = node.get_first_leaf()
if is_expression:
code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n'
remaining_prefix = None
has_ending_return_stmt = False
else:
has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1])
if not has_ending_return_stmt:
# Find the actually used variables (of the defined ones). If none are
# used (e.g. if the range covers the whole function), return the last
# defined variable.
return_variables = list(_find_needed_output_variables(
context,
nodes[0].parent,
nodes[-1].end_pos,
return_variables
)) or [return_variables[-1]] if return_variables else []
remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos)
after_leaf = nodes[-1].get_next_leaf()
first, second = _split_prefix_at(after_leaf, until_pos[0])
code_block += first
code_block = dedent(code_block)
if not has_ending_return_stmt:
output_var_str = ', '.join(return_variables)
code_block += 'return ' + output_var_str + '\n'
# Check if we have to raise RefactoringError
_check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes)
decorator = ''
self_param = None
if is_bound_method:
if not function_is_staticmethod(context.tree_node):
function_param_names = context.get_value().get_param_names()
if len(function_param_names):
self_param = function_param_names[0].string_name
params = [p for p in params if p != self_param]
if function_is_classmethod(context.tree_node):
decorator = '@classmethod\n'
else:
code_block += '\n'
function_code = '%sdef %s(%s):\n%s' % (
decorator,
name,
', '.join(params if self_param is None else [self_param] + params),
indent_block(code_block)
)
function_call = '%s(%s)' % (
('' if self_param is None else self_param + '.') + name,
', '.join(params)
)
if is_expression:
replacement = function_call
else:
if has_ending_return_stmt:
replacement = 'return ' + function_call + '\n'
else:
replacement = output_var_str + ' = ' + function_call + '\n'
replacement_dct = _replace(nodes, replacement, function_code, pos,
insert_before_leaf, remaining_prefix)
if not is_expression:
replacement_dct[after_leaf] = second + after_leaf.value
file_to_node_changes = {path: replacement_dct}
return Refactoring(inference_state, file_to_node_changes)
def _check_for_non_extractables(nodes):
for n in nodes:
try:
children = n.children
except AttributeError:
if n.value == 'return':
raise RefactoringError(
'Can only extract return statements if they are at the end.')
if n.value == 'yield':
raise RefactoringError('Cannot extract yield statements.')
else:
_check_for_non_extractables(children)
def _is_name_input(module_context, names, first, last):
for name in names:
if name.api_type == 'param' or not name.parent_context.is_module():
if name.get_root_context() is not module_context:
return True
if name.start_pos is None or not (first <= name.start_pos < last):
return True
return False
def _find_inputs_and_outputs(module_context, context, nodes):
first = nodes[0].start_pos
last = nodes[-1].end_pos
inputs = []
outputs = []
for name in _find_non_global_names(nodes):
if name.is_definition():
if name not in outputs:
outputs.append(name.value)
else:
if name.value not in inputs:
name_definitions = context.goto(name, name.start_pos)
if not name_definitions \
or _is_name_input(module_context, name_definitions, first, last):
inputs.append(name.value)
# Check if outputs are really needed:
return inputs, outputs
def _find_non_global_names(nodes):
for node in nodes:
try:
children = node.children
except AttributeError:
if node.type == 'name':
yield node
else:
# We only want to check foo in foo.bar
if node.type == 'trailer' and node.children[0] == '.':
continue
for x in _find_non_global_names(children): # Python 2...
yield x
def _get_code_insertion_node(node, is_bound_method):
if not is_bound_method or function_is_staticmethod(node):
while node.parent.type != 'file_input':
node = node.parent
while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'):
node = node.parent
return node
def _find_needed_output_variables(context, search_node, at_least_pos, return_variables):
"""
Searches everything after at_least_pos in a node and checks if any of the
return_variables are used in there and returns those.
"""
for node in search_node.children:
if node.start_pos < at_least_pos:
continue
return_variables = set(return_variables)
for name in _find_non_global_names([node]):
if not name.is_definition() and name.value in return_variables:
return_variables.remove(name.value)
yield name.value
def _is_node_ending_return_stmt(node):
t = node.type
if t == 'simple_stmt':
return _is_node_ending_return_stmt(node.children[0])
return t == 'return_stmt'

View file

@ -0,0 +1,29 @@
"""
To use Jedi completion in Python interpreter, add the following in your shell
setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
not available on Windows. If you still want Jedi autocompletion in your REPL,
just use IPython instead::
export PYTHONSTARTUP="$(python -m jedi repl)"
Then you will be able to use Jedi completer in your Python interpreter::
$ python
Python 2.7.2+ (default, Jul 20 2012, 22:15:08)
[GCC 4.6.1] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import os
>>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP
..dex ..sert
"""
import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline(fuzzy=False)
del jedi
# Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter.

View file

@ -0,0 +1,109 @@
"""
This module is here for string completions. This means mostly stuff where
strings are returned, like `foo = dict(bar=3); foo["ba` would complete to
`"bar"]`.
It however does the same for numbers. The difference between string completions
and other completions is mostly that this module doesn't return defined
names in a module, but pretty much an arbitrary string.
"""
import re
from jedi._compatibility import unicode
from jedi.inference.names import AbstractArbitraryName
from jedi.inference.helpers import infer_call_of_leaf
from jedi.api.classes import Completion
from jedi.parser_utils import cut_value_at_position
_sentinel = object()
class StringName(AbstractArbitraryName):
api_type = u'string'
is_value_name = False
def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
bracket_leaf = leaf
if bracket_leaf != '[':
bracket_leaf = leaf.get_previous_leaf()
cut_end_quote = ''
if string:
cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True)
if bracket_leaf == '[':
if string is None and leaf is not bracket_leaf:
string = cut_value_at_position(leaf, position)
context = module_context.create_context(bracket_leaf)
before_bracket_leaf = bracket_leaf.get_previous_leaf()
if before_bracket_leaf.type in ('atom', 'trailer', 'name'):
values = infer_call_of_leaf(context, before_bracket_leaf)
return list(_completions_for_dicts(
module_context.inference_state,
values,
'' if string is None else string,
cut_end_quote,
fuzzy=fuzzy,
))
return []
def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy):
for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)):
dict_key_str = _create_repr_string(literal_string, dict_key)
if dict_key_str.startswith(literal_string):
name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None])
yield Completion(
inference_state,
name,
stack=None,
like_name_length=len(literal_string),
is_fuzzy=fuzzy
)
def _create_repr_string(literal_string, dict_key):
if not isinstance(dict_key, (unicode, bytes)) or not literal_string:
return repr(dict_key)
r = repr(dict_key)
prefix, quote = _get_string_prefix_and_quote(literal_string)
if quote is None:
return r
if quote == r[0]:
return prefix + r
return prefix + quote + r[1:-1] + quote
def _get_python_keys(dicts):
for dct in dicts:
if dct.array_type == 'dict':
for key in dct.get_key_values():
dict_key = key.get_safe_value(default=_sentinel)
if dict_key is not _sentinel:
yield dict_key
def _get_string_prefix_and_quote(string):
match = re.match(r'(\w*)("""|\'{3}|"|\')', string)
if match is None:
return None, None
return match.group(1), match.group(2)
def _matches_quote_at_position(code_lines, quote, position):
string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
return string == quote
def get_quote_ending(string, code_lines, position, invert_result=False):
_, quote = _get_string_prefix_and_quote(string)
if quote is None:
return ''
# Add a quote only if it's not already there.
if _matches_quote_at_position(code_lines, quote, position) != invert_result:
return ''
return quote